diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2021-02-07 11:45:55 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2021-02-07 11:45:55 +0000 |
commit | a8220ab2d293bb7f4b014b79d16b2fb05090fa93 (patch) | |
tree | 77f0a30f016c0925cf7ee9292e644bba183c2774 /collectors/python.d.plugin/rabbitmq | |
parent | Adding upstream version 1.19.0. (diff) | |
download | netdata-a8220ab2d293bb7f4b014b79d16b2fb05090fa93.tar.xz netdata-a8220ab2d293bb7f4b014b79d16b2fb05090fa93.zip |
Adding upstream version 1.29.0.upstream/1.29.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin/rabbitmq')
-rw-r--r-- | collectors/python.d.plugin/rabbitmq/README.md | 46 | ||||
-rw-r--r-- | collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py | 152 | ||||
-rw-r--r-- | collectors/python.d.plugin/rabbitmq/rabbitmq.conf | 6 |
3 files changed, 197 insertions, 7 deletions
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md index 1d7ad956d..2130a7b3a 100644 --- a/collectors/python.d.plugin/rabbitmq/README.md +++ b/collectors/python.d.plugin/rabbitmq/README.md @@ -1,6 +1,13 @@ -# rabbitmq +<!-- +title: "RabbitMQ monitoring with Netdata" +custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md +sidebar_label: "RabbitMQ" +--> + +# RabbitMQ monitoring with Netdata + +Collects message broker global and per virtual host metrics. -This module monitors [RabbitMQ](https://www.rabbitmq.com/) performance and health metrics. Following charts are drawn: @@ -62,7 +69,38 @@ Per Vhost charts: - redeliver - return_unroutable -## configuration +2. Per Queue charts: + + 1. **Queued Messages** + + - messages + - paged_out + - persistent + - ready + - unacknowledged + + 2. **Queue Messages stats** + + - ack + - confirm + - deliver + - get + - get_no_ack + - publish + - redeliver + - return_unroutable + +## Configuration + +Edit the `python.d/rabbitmq.conf` configuration file using `edit-config` from the Netdata [config +directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`. + +```bash +cd /etc/netdata # Replace this path with your Netdata config directory, if different +sudo ./edit-config python.d/rabbitmq.conf +``` + +When no configuration file is found, module tries to connect to: `localhost:15672`. ```yaml socket: @@ -73,8 +111,6 @@ socket: pass : 'guest' ``` -When no configuration file is found, module tries to connect to: `localhost:15672`. - --- [![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Frabbitmq%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>) diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py index d581c14e7..866b777f7 100644 --- a/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.chart.py @@ -9,6 +9,7 @@ from bases.FrameworkServices.UrlService import UrlService API_NODE = 'api/nodes' API_OVERVIEW = 'api/overview' +API_QUEUES = 'api/queues' API_VHOSTS = 'api/vhosts' NODE_STATS = [ @@ -31,7 +32,30 @@ OVERVIEW_STATS = [ 'message_stats.ack', 'message_stats.redeliver', 'message_stats.deliver', - 'message_stats.publish' + 'message_stats.publish', + 'churn_rates.connection_created_details.rate', + 'churn_rates.connection_closed_details.rate', + 'churn_rates.channel_created_details.rate', + 'churn_rates.channel_closed_details.rate', + 'churn_rates.queue_created_details.rate', + 'churn_rates.queue_declared_details.rate', + 'churn_rates.queue_deleted_details.rate' +] + +QUEUE_STATS = [ + 'messages', + 'messages_paged_out', + 'messages_persistent', + 'messages_ready', + 'messages_unacknowledged', + 'message_stats.ack', + 'message_stats.confirm', + 'message_stats.deliver', + 'message_stats.get', + 'message_stats.get_no_ack', + 'message_stats.publish', + 'message_stats.redeliver', + 'message_stats.return_unroutable', ] VHOST_MESSAGE_STATS = [ @@ -47,6 +71,9 @@ VHOST_MESSAGE_STATS = [ ORDER = [ 'queued_messages', + 'connection_churn_rates', + 'channel_churn_rates', + 'queue_churn_rates', 'message_rates', 'global_counts', 'file_descriptors', @@ -104,6 +131,28 @@ CHARTS = { ['object_totals_exchanges', 'exchanges', 'absolute'] ] }, + 'connection_churn_rates': { + 'options': [None, 'Connection Churn Rates', 'operations/s', 'overview', 'rabbitmq.connection_churn_rates', 'line'], + 'lines': [ + ['churn_rates_connection_created_details_rate', 'created', 'absolute'], + ['churn_rates_connection_closed_details_rate', 'closed', 'absolute'] + ] + }, + 'channel_churn_rates': { + 'options': [None, 'Channel Churn Rates', 'operations/s', 'overview', 'rabbitmq.channel_churn_rates', 'line'], + 'lines': [ + ['churn_rates_channel_created_details_rate', 'created', 'absolute'], + ['churn_rates_channel_closed_details_rate', 'closed', 'absolute'] + ] + }, + 'queue_churn_rates': { + 'options': [None, 'Queue Churn Rates', 'operations/s', 'overview', 'rabbitmq.queue_churn_rates', 'line'], + 'lines': [ + ['churn_rates_queue_created_details_rate', 'created', 'absolute'], + ['churn_rates_queue_declared_details_rate', 'declared', 'absolute'], + ['churn_rates_queue_deleted_details_rate', 'deleted', 'absolute'] + ] + }, 'queued_messages': { 'options': [None, 'Queued Messages', 'messages', 'overview', 'rabbitmq.queued_messages', 'stacked'], 'lines': [ @@ -148,6 +197,44 @@ def vhost_chart_template(name): return order, charts +def queue_chart_template(queue_id): + vhost, name = queue_id + order = [ + 'vhost_{0}_queue_{1}_queued_message'.format(vhost, name), + 'vhost_{0}_queue_{1}_messages_stats'.format(vhost, name), + ] + family = 'vhost {0}'.format(vhost) + + charts = { + order[0]: { + 'options': [ + None, 'Queue "{0}" in "{1}" queued messages'.format(name, vhost), 'messages', family, 'rabbitmq.queue_messages', 'line'], + 'lines': [ + ['vhost_{0}_queue_{1}_messages'.format(vhost, name), 'messages', 'absolute'], + ['vhost_{0}_queue_{1}_messages_paged_out'.format(vhost, name), 'paged_out', 'absolute'], + ['vhost_{0}_queue_{1}_messages_persistent'.format(vhost, name), 'persistent', 'absolute'], + ['vhost_{0}_queue_{1}_messages_ready'.format(vhost, name), 'ready', 'absolute'], + ['vhost_{0}_queue_{1}_messages_unacknowledged'.format(vhost, name), 'unack', 'absolute'], + ] + }, + order[1]: { + 'options': [ + None, 'Queue "{0}" in "{1}" messages stats'.format(name, vhost), 'messages/s', family, 'rabbitmq.queue_messages_stats', 'line'], + 'lines': [ + ['vhost_{0}_queue_{1}_message_stats_ack'.format(vhost, name), 'ack', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_confirm'.format(vhost, name), 'confirm', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_deliver'.format(vhost, name), 'deliver', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_get'.format(vhost, name), 'get', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_get_no_ack'.format(vhost, name), 'get_no_ack', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_publish'.format(vhost, name), 'publish', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_redeliver'.format(vhost, name), 'redeliver', 'incremental'], + ['vhost_{0}_queue_{1}_message_stats_return_unroutable'.format(vhost, name), 'return_unroutable', 'incremental'], + ] + }, + } + + return order, charts + class VhostStatsBuilder: def __init__(self): @@ -167,6 +254,21 @@ class VhostStatsBuilder: stats = fetch_data(raw_data=self.stats, metrics=VHOST_MESSAGE_STATS) return dict(('vhost_{0}_{1}'.format(name, k), v) for k, v in stats.items()) +class QueueStatsBuilder: + def __init__(self): + self.stats = None + + def set(self, raw_stats): + self.stats = raw_stats + + def id(self): + return self.stats['vhost'], self.stats['name'] + + def queue_stats(self): + vhost, name = self.id() + stats = fetch_data(raw_data=self.stats, metrics=QUEUE_STATS) + return dict(('vhost_{0}_queue_{1}_{2}'.format(vhost, name, k), v) for k, v in stats.items()) + class Service(UrlService): def __init__(self, configuration=None, name=None): @@ -181,6 +283,11 @@ class Service(UrlService): self.node_name = str() self.vhost = VhostStatsBuilder() self.collected_vhosts = set() + self.collect_queues_metrics = configuration.get('collect_queues_metrics', False) + self.debug("collect_queues_metrics is {0}".format("enabled" if self.collect_queues_metrics else "disabled")) + if self.collect_queues_metrics: + self.queue = QueueStatsBuilder() + self.collected_queues = set() def _get_data(self): data = dict() @@ -201,6 +308,11 @@ class Service(UrlService): if stats: data.update(stats) + if self.collect_queues_metrics: + stats = self.get_queues_stats() + if stats: + data.update(stats) + return data or None def get_overview_stats(self): @@ -260,6 +372,31 @@ class Service(UrlService): self.debug("number of vhosts: {0}, metrics: {1}".format(len(vhosts), len(data))) return data + def get_queues_stats(self): + url = '{0}/{1}'.format(self.url, API_QUEUES) + self.debug("doing http request to '{0}'".format(url)) + raw = self._get_raw_data(url) + if not raw: + return None + + data = dict() + queues = loads(raw) + charts_initialized = len(self.charts) > 0 + + for queue in queues: + self.queue.set(queue) + if self.queue.id()[0] not in self.collected_vhosts: + continue + + if charts_initialized and self.queue.id() not in self.collected_queues: + self.collected_queues.add(self.queue.id()) + self.add_queue_charts(self.queue.id()) + + data.update(self.queue.queue_stats()) + + self.debug("number of queues: {0}, metrics: {1}".format(len(queues), len(data))) + return data + def add_vhost_charts(self, vhost_name): order, charts = vhost_chart_template(vhost_name) @@ -271,6 +408,17 @@ class Service(UrlService): for dimension in dimensions: new_chart.add_dimension(dimension) + def add_queue_charts(self, queue_id): + order, charts = queue_chart_template(queue_id) + + for chart_name in order: + params = [chart_name] + charts[chart_name]['options'] + dimensions = charts[chart_name]['lines'] + + new_chart = self.charts.add_chart(params) + for dimension in dimensions: + new_chart.add_dimension(dimension) + def fetch_data(raw_data, metrics): data = dict() @@ -291,5 +439,5 @@ def handle_disabled_disk_monitoring(node_stats): # https://github.com/netdata/netdata/issues/7218 # can be "disk_free": "disk_free_monitoring_disabled" v = node_stats.get('disk_free') - if v and isinstance(v, str): + if v and not isinstance(v, int): del node_stats['disk_free'] diff --git a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf index ae0dbdb75..47d47a1bf 100644 --- a/collectors/python.d.plugin/rabbitmq/rabbitmq.conf +++ b/collectors/python.d.plugin/rabbitmq/rabbitmq.conf @@ -70,6 +70,12 @@ # user: 'username' # pass: 'password' # +# Rabbitmq plugin can also collect stats per vhost per queues, which is disabled +# by default. Please note that enabling this can induced a serious overhead on +# both netdata and rabbitmq if a look of queues are configured and used. +# +# collect_queues_metrics: 'yes/no' +# # ---------------------------------------------------------------------- # AUTO-DETECTION JOBS # only one of them will run (they have the same name) |