summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/rabbitmq
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:19:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2018-11-07 12:20:17 +0000
commita64a253794ac64cb40befee54db53bde17dd0d49 (patch)
treec1024acc5f6e508814b944d99f112259bb28b1be /collectors/python.d.plugin/rabbitmq
parentNew upstream version 1.10.0+dfsg (diff)
downloadnetdata-a64a253794ac64cb40befee54db53bde17dd0d49.tar.xz
netdata-a64a253794ac64cb40befee54db53bde17dd0d49.zip
New upstream version 1.11.0+dfsgupstream/1.11.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin/rabbitmq')
-rw-r--r--collectors/python.d.plugin/rabbitmq/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md56
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py207
-rw-r--r--collectors/python.d.plugin/rabbitmq/rabbitmq.conf82
4 files changed, 358 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/rabbitmq/Makefile.inc b/collectors/python.d.plugin/rabbitmq/Makefile.inc
new file mode 100644
index 00000000..7e67ef51
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += rabbitmq/rabbitmq.chart.py
+dist_pythonconfig_DATA += rabbitmq/rabbitmq.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += rabbitmq/README.md rabbitmq/Makefile.inc
+
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
new file mode 100644
index 00000000..22d367c4
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -0,0 +1,56 @@
+# rabbitmq
+
+Module monitor rabbitmq performance and health metrics.
+
+Following charts are drawn:
+
+1. **Queued Messages**
+ * ready
+ * unacknowledged
+
+2. **Message Rates**
+ * ack
+ * redelivered
+ * deliver
+ * publish
+
+3. **Global Counts**
+ * channels
+ * consumers
+ * connections
+ * queues
+ * exchanges
+
+4. **File Descriptors**
+ * used descriptors
+
+5. **Socket Descriptors**
+ * used descriptors
+
+6. **Erlang processes**
+ * used processes
+
+7. **Erlang run queue**
+ * Erlang run queue
+
+8. **Memory**
+ * free memory in megabytes
+
+9. **Disk Space**
+ * free disk space in gigabytes
+
+### configuration
+
+```yaml
+socket:
+ name : 'local'
+ host : '127.0.0.1'
+ port : 15672
+ user : 'guest'
+ pass : 'guest'
+
+```
+
+When no configuration file is found, module tries to connect to: `localhost:15672`.
+
+---
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
new file mode 100644
index 00000000..8298b403
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Description: rabbitmq netdata python.d module
+# Author: l2isbad
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from collections import namedtuple
+from json import loads
+from socket import gethostbyname, gaierror
+from threading import Thread
+try:
+ from queue import Queue
+except ImportError:
+ from Queue import Queue
+
+from bases.FrameworkServices.UrlService import UrlService
+
+# default module values (can be overridden per job in `config`)
+update_every = 1
+priority = 60000
+retries = 60
+
+METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
+
+NODE_STATS = [
+ 'fd_used',
+ 'mem_used',
+ 'sockets_used',
+ 'proc_used',
+ 'disk_free',
+ 'run_queue'
+]
+
+OVERVIEW_STATS = [
+ 'object_totals.channels',
+ 'object_totals.consumers',
+ 'object_totals.connections',
+ 'object_totals.queues',
+ 'object_totals.exchanges',
+ 'queue_totals.messages_ready',
+ 'queue_totals.messages_unacknowledged',
+ 'message_stats.ack',
+ 'message_stats.redeliver',
+ 'message_stats.deliver',
+ 'message_stats.publish'
+]
+
+ORDER = [
+ 'queued_messages',
+ 'message_rates',
+ 'global_counts',
+ 'file_descriptors',
+ 'socket_descriptors',
+ 'erlang_processes',
+ 'erlang_run_queue',
+ 'memory',
+ 'disk_space'
+]
+
+CHARTS = {
+ 'file_descriptors': {
+ 'options': [None, 'File Descriptors', 'descriptors', 'overview', 'rabbitmq.file_descriptors', 'line'],
+ 'lines': [
+ ['fd_used', 'used', 'absolute']
+ ]
+ },
+ 'memory': {
+ 'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'line'],
+ 'lines': [
+ ['mem_used', 'used', 'absolute', 1, 1024 << 10]
+ ]
+ },
+ 'disk_space': {
+ 'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'line'],
+ 'lines': [
+ ['disk_free', 'free', 'absolute', 1, 1024 ** 3]
+ ]
+ },
+ 'socket_descriptors': {
+ 'options': [None, 'Socket Descriptors', 'descriptors', 'overview', 'rabbitmq.sockets', 'line'],
+ 'lines': [
+ ['sockets_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_processes': {
+ 'options': [None, 'Erlang Processes', 'processes', 'overview', 'rabbitmq.processes', 'line'],
+ 'lines': [
+ ['proc_used', 'used', 'absolute']
+ ]
+ },
+ 'erlang_run_queue': {
+ 'options': [None, 'Erlang Run Queue', 'processes', 'overview', 'rabbitmq.erlang_run_queue', 'line'],
+ 'lines': [
+ ['run_queue', 'length', 'absolute']
+ ]
+ },
+ 'global_counts': {
+ 'options': [None, 'Global Counts', 'counts', 'overview', 'rabbitmq.global_counts', 'line'],
+ 'lines': [
+ ['object_totals_channels', 'channels', 'absolute'],
+ ['object_totals_consumers', 'consumers', 'absolute'],
+ ['object_totals_connections', 'connections', 'absolute'],
+ ['object_totals_queues', 'queues', 'absolute'],
+ ['object_totals_exchanges', 'exchanges', 'absolute']
+ ]
+ },
+ 'queued_messages': {
+ 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'],
+ 'lines': [
+ ['queue_totals_messages_ready', 'ready', 'absolute'],
+ ['queue_totals_messages_unacknowledged', 'unacknowledged', 'absolute']
+ ]
+ },
+ 'message_rates': {
+ 'options': [None, 'Message Rates', 'messages/s', 'overview', 'rabbitmq.message_rates', 'stacked'],
+ 'lines': [
+ ['message_stats_ack', 'ack', 'incremental'],
+ ['message_stats_redeliver', 'redeliver', 'incremental'],
+ ['message_stats_deliver', 'deliver', 'incremental'],
+ ['message_stats_publish', 'publish', 'incremental']
+ ]
+ }
+}
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.host = self.configuration.get('host', '127.0.0.1')
+ self.port = self.configuration.get('port', 15672)
+ self.scheme = self.configuration.get('scheme', 'http')
+
+ def check(self):
+ # We can't start if <host> AND <port> not specified
+ if not (self.host and self.port):
+ self.error('Host is not defined in the module configuration file')
+ return False
+
+ # Hostname -> ip address
+ try:
+ self.host = gethostbyname(self.host)
+ except gaierror as error:
+ self.error(str(error))
+ return False
+
+ # Add handlers (auth, self signed cert accept)
+ self.url = '{scheme}://{host}:{port}/api'.format(scheme=self.scheme,
+ host=self.host,
+ port=self.port)
+ # Add methods
+ api_node = self.url + '/nodes'
+ api_overview = self.url + '/overview'
+ self.methods = [METHODS(get_data=self._get_overview_stats,
+ url=api_node,
+ stats=NODE_STATS),
+ METHODS(get_data=self._get_overview_stats,
+ url=api_overview,
+ stats=OVERVIEW_STATS)]
+ return UrlService.check(self)
+
+ def _get_data(self):
+ threads = list()
+ queue = Queue()
+ result = dict()
+
+ for method in self.methods:
+ th = Thread(target=method.get_data,
+ args=(queue, method.url, method.stats))
+ th.start()
+ threads.append(th)
+
+ for thread in threads:
+ thread.join()
+ result.update(queue.get())
+
+ return result or None
+
+ def _get_overview_stats(self, queue, url, stats):
+ """
+ Format data received from http request
+ :return: dict
+ """
+
+ raw_data = self._get_raw_data(url)
+
+ if not raw_data:
+ return queue.put(dict())
+ data = loads(raw_data)
+ data = data[0] if isinstance(data, list) else data
+
+ to_netdata = fetch_data(raw_data=data, metrics=stats)
+ return queue.put(to_netdata)
+
+
+def fetch_data(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
new file mode 100644
index 00000000..3f90da8a
--- /dev/null
+++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf
@@ -0,0 +1,82 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 60
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 60 # the JOB's number of restoration attempts
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'