summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/Makefile.am4
-rw-r--r--collectors/python.d.plugin/README.md12
-rw-r--r--collectors/python.d.plugin/adaptec_raid/README.md11
-rw-r--r--collectors/python.d.plugin/alarms/README.md9
-rw-r--r--collectors/python.d.plugin/am2320/README.md7
-rw-r--r--collectors/python.d.plugin/anomalies/README.md16
-rw-r--r--collectors/python.d.plugin/beanstalk/README.md7
-rw-r--r--collectors/python.d.plugin/bind_rndc/README.md7
-rw-r--r--collectors/python.d.plugin/boinc/README.md7
-rw-r--r--collectors/python.d.plugin/ceph/README.md7
-rw-r--r--collectors/python.d.plugin/changefinder/README.md8
-rw-r--r--collectors/python.d.plugin/dockerd/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/dockerd/README.md46
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.chart.py86
-rw-r--r--collectors/python.d.plugin/dockerd/dockerd.conf77
-rw-r--r--collectors/python.d.plugin/dovecot/README.md7
-rw-r--r--collectors/python.d.plugin/example/README.md10
-rw-r--r--collectors/python.d.plugin/exim/README.md5
-rw-r--r--collectors/python.d.plugin/fail2ban/README.md7
-rw-r--r--collectors/python.d.plugin/gearman/README.md7
-rw-r--r--collectors/python.d.plugin/go_expvar/README.md15
-rw-r--r--collectors/python.d.plugin/haproxy/README.md9
-rw-r--r--collectors/python.d.plugin/hddtemp/README.md7
-rw-r--r--collectors/python.d.plugin/hpssa/README.md13
-rw-r--r--collectors/python.d.plugin/icecast/README.md7
-rw-r--r--collectors/python.d.plugin/ipfs/README.md7
-rw-r--r--collectors/python.d.plugin/litespeed/README.md7
-rw-r--r--collectors/python.d.plugin/logind/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/logind/README.md86
-rw-r--r--collectors/python.d.plugin/logind/logind.chart.py85
-rw-r--r--collectors/python.d.plugin/logind/logind.conf60
-rw-r--r--collectors/python.d.plugin/megacli/README.md11
-rw-r--r--collectors/python.d.plugin/memcached/README.md7
-rw-r--r--collectors/python.d.plugin/mongodb/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/mongodb/README.md210
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.chart.py786
-rw-r--r--collectors/python.d.plugin/mongodb/mongodb.conf102
-rw-r--r--collectors/python.d.plugin/monit/README.md44
-rw-r--r--collectors/python.d.plugin/nsd/README.md5
-rw-r--r--collectors/python.d.plugin/ntpd/README.md90
-rw-r--r--collectors/python.d.plugin/ntpd/ntpd.chart.py2
-rw-r--r--collectors/python.d.plugin/nvidia_smi/README.md11
-rw-r--r--collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py53
-rw-r--r--collectors/python.d.plugin/openldap/README.md7
-rw-r--r--collectors/python.d.plugin/oracledb/README.md7
-rw-r--r--collectors/python.d.plugin/postfix/README.md5
-rw-r--r--collectors/python.d.plugin/proxysql/README.md108
-rw-r--r--collectors/python.d.plugin/proxysql/proxysql.chart.py2
-rw-r--r--collectors/python.d.plugin/puppet/README.md7
-rw-r--r--collectors/python.d.plugin/python.d.conf3
-rw-r--r--collectors/python.d.plugin/rabbitmq/README.md9
-rw-r--r--collectors/python.d.plugin/rethinkdbs/README.md38
-rw-r--r--collectors/python.d.plugin/retroshare/README.md7
-rw-r--r--collectors/python.d.plugin/riakkv/README.md7
-rw-r--r--collectors/python.d.plugin/samba/README.md11
-rw-r--r--collectors/python.d.plugin/sensors/README.md11
-rw-r--r--collectors/python.d.plugin/smartd_log/README.md7
-rw-r--r--collectors/python.d.plugin/spigotmc/README.md7
-rw-r--r--collectors/python.d.plugin/springboot/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/springboot/README.md145
-rw-r--r--collectors/python.d.plugin/springboot/springboot.chart.py160
-rw-r--r--collectors/python.d.plugin/springboot/springboot.conf118
-rw-r--r--collectors/python.d.plugin/squid/README.md7
-rw-r--r--collectors/python.d.plugin/tomcat/README.md7
-rw-r--r--collectors/python.d.plugin/tor/README.md7
-rw-r--r--collectors/python.d.plugin/traefik/README.md62
-rw-r--r--collectors/python.d.plugin/uwsgi/README.md7
-rw-r--r--collectors/python.d.plugin/varnish/README.md7
-rw-r--r--collectors/python.d.plugin/w1sensor/README.md7
-rw-r--r--collectors/python.d.plugin/w1sensor/w1sensor.chart.py2
-rw-r--r--collectors/python.d.plugin/zscores/README.md8
71 files changed, 400 insertions, 2382 deletions
diff --git a/collectors/python.d.plugin/Makefile.am b/collectors/python.d.plugin/Makefile.am
index 1bbbf8ca0..6ea7b21b5 100644
--- a/collectors/python.d.plugin/Makefile.am
+++ b/collectors/python.d.plugin/Makefile.am
@@ -48,7 +48,6 @@ include bind_rndc/Makefile.inc
include boinc/Makefile.inc
include ceph/Makefile.inc
include changefinder/Makefile.inc
-include dockerd/Makefile.inc
include dovecot/Makefile.inc
include example/Makefile.inc
include exim/Makefile.inc
@@ -61,10 +60,8 @@ include hpssa/Makefile.inc
include icecast/Makefile.inc
include ipfs/Makefile.inc
include litespeed/Makefile.inc
-include logind/Makefile.inc
include megacli/Makefile.inc
include memcached/Makefile.inc
-include mongodb/Makefile.inc
include monit/Makefile.inc
include nvidia_smi/Makefile.inc
include nsd/Makefile.inc
@@ -83,7 +80,6 @@ include samba/Makefile.inc
include sensors/Makefile.inc
include smartd_log/Makefile.inc
include spigotmc/Makefile.inc
-include springboot/Makefile.inc
include squid/Makefile.inc
include tomcat/Makefile.inc
include tor/Makefile.inc
diff --git a/collectors/python.d.plugin/README.md b/collectors/python.d.plugin/README.md
index 2f5ebfcb1..b6d658fae 100644
--- a/collectors/python.d.plugin/README.md
+++ b/collectors/python.d.plugin/README.md
@@ -1,6 +1,10 @@
<!--
title: "python.d.plugin"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/README.md"
+sidebar_label: "python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/Collectors"
-->
# python.d.plugin
@@ -86,7 +90,7 @@ plugin](https://raw.githubusercontent.com/netdata/netdata/master/collectors/pyth
Netdata (as opposed to having to install Netdata from source again with your new changes) to can copy over the relevant
file to where Netdata expects it and then either `sudo systemctl restart netdata` to have it be picked up and used by
Netdata or you can just run the updated collector in debug mode by following a process like below (this assumes you have
-[installed Netdata from a GitHub fork](https://learn.netdata.cloud/docs/agent/packaging/installer/methods/manual) you
+[installed Netdata from a GitHub fork](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/manual.md) you
have made to do your development on).
```bash
@@ -125,7 +129,7 @@ CHART = {
]}
```
-All names are better explained in the [External Plugins](/collectors/plugins.d/README.md) section.
+All names are better explained in the [External Plugins](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md) section.
Parameters like `priority` and `update_every` are handled by `python.d.plugin`.
### `Service` class
@@ -227,7 +231,7 @@ For additional security it uses python `subprocess.Popen` (without `shell=True`
_Examples: `apache`, `nginx`, `tomcat`_
-_Multiple Endpoints (urls) Examples: [`rabbitmq`](/collectors/python.d.plugin/rabbitmq/README.md) (simpler).
+_Multiple Endpoints (urls) Examples: [`rabbitmq`](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/rabbitmq/README.md) (simpler).
_Variables from config file_: `url`, `user`, `pass`.
diff --git a/collectors/python.d.plugin/adaptec_raid/README.md b/collectors/python.d.plugin/adaptec_raid/README.md
index da5d13b16..90ef8fa3c 100644
--- a/collectors/python.d.plugin/adaptec_raid/README.md
+++ b/collectors/python.d.plugin/adaptec_raid/README.md
@@ -1,7 +1,10 @@
<!--
title: "Adaptec RAID controller monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/adaptec_raid/README.md"
sidebar_label: "Adaptec RAID"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Hardware"
-->
# Adaptec RAID controller monitoring with Netdata
@@ -52,7 +55,7 @@ systemctl restart netdata.service
## Enable the collector
The `adaptec_raid` collector is disabled by default. To enable it, use `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
file.
```bash
@@ -61,12 +64,12 @@ sudo ./edit-config python.d.conf
```
Change the value of the `adaptec_raid` setting to `yes`. Save the file and restart the Netdata Agent with `sudo
-systemctl restart netdata`, or the [appropriate method](/docs/configure/start-stop-restart.md) for your system.
+systemctl restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Configuration
Edit the `python.d/adaptec_raid.conf` configuration file using `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/alarms/README.md b/collectors/python.d.plugin/alarms/README.md
index 8dc666f5b..4804bd0d7 100644
--- a/collectors/python.d.plugin/alarms/README.md
+++ b/collectors/python.d.plugin/alarms/README.md
@@ -1,6 +1,9 @@
<!--
title: "Alarms"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/alarms/README.md"
+sidebar_label: "alarms"
+learn_status: "Unpublished"
+learn_topic_type: "References"
-->
# Alarms - graphing Netdata alarm states over time
@@ -23,7 +26,7 @@ Below is an example of the chart produced when running `stress-ng --all 2` for a
## Configuration
-Enable the collector and [restart Netdata](/docs/configure/start-stop-restart.md).
+Enable the collector and [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
```bash
cd /etc/netdata/
@@ -33,7 +36,7 @@ sudo systemctl restart netdata
```
If needed, edit the `python.d/alarms.conf` configuration file using `edit-config` from the your agent's [config
-directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/am2320/README.md b/collectors/python.d.plugin/am2320/README.md
index 3503d7c17..070e8eb38 100644
--- a/collectors/python.d.plugin/am2320/README.md
+++ b/collectors/python.d.plugin/am2320/README.md
@@ -1,7 +1,10 @@
<!--
title: "AM2320 sensor monitoring with netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/am2320/README.md"
sidebar_label: "AM2320"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Remotes/Devices"
-->
# AM2320 sensor monitoring with netdata
@@ -21,7 +24,7 @@ It produces the following charts:
## Configuration
Edit the `python.d/am2320.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/anomalies/README.md b/collectors/python.d.plugin/anomalies/README.md
index aaf39ab92..7c59275f9 100644
--- a/collectors/python.d.plugin/anomalies/README.md
+++ b/collectors/python.d.plugin/anomalies/README.md
@@ -1,13 +1,17 @@
<!--
title: "Anomaly detection with Netdata"
description: "Use ML-driven anomaly detection to narrow your focus to only affected metrics and services/processes on your node to shorten root cause analysis."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md
-sidebar_url: Anomalies
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/anomalies/README.md"
+sidebar_url: "Anomalies"
+sidebar_label: "anomalies"
+learn_status: "Unpublished"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Misc"
-->
# Anomaly detection with Netdata
-**Note**: Check out the [Netdata Anomaly Advisor](https://learn.netdata.cloud/docs/cloud/insights/anomaly-advisor) for a more native anomaly detection experience within Netdata.
+**Note**: Check out the [Netdata Anomaly Advisor](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/anomaly-advisor.mdx) for a more native anomaly detection experience within Netdata.
This collector uses the Python [PyOD](https://pyod.readthedocs.io/en/latest/index.html) library to perform unsupervised [anomaly detection](https://en.wikipedia.org/wiki/Anomaly_detection) on your Netdata charts and/or dimensions.
@@ -70,7 +74,7 @@ The configuration for the anomalies collector defines how it will behave on your
_**Note**: If you are unsure about any of the below configuration options then it's best to just ignore all this and leave the `anomalies.conf` file alone to begin with. Then you can return to it later if you would like to tune things a bit more once the collector is running for a while and you have a feeling for its performance on your node._
Edit the `python.d/anomalies.conf` configuration file using `edit-config` from the your agent's [config
-directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -216,7 +220,7 @@ If you would like to go deeper on what exactly the anomalies collector is doing
## Notes
-- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://learn.netdata.cloud/docs/agent/web/api) to get the required data for each chart.
+- Python 3 is required as the [`netdata-pandas`](https://github.com/netdata/netdata-pandas) package uses Python async libraries ([asks](https://pypi.org/project/asks/) and [trio](https://pypi.org/project/trio/)) to make asynchronous calls to the [Netdata REST API](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the required data for each chart.
- Python 3 is also required for the underlying ML libraries of [numba](https://pypi.org/project/numba/), [scikit-learn](https://pypi.org/project/scikit-learn/), and [PyOD](https://pypi.org/project/pyod/).
- It may take a few hours or so (depending on your choice of `train_secs_n`) for the collector to 'settle' into it's typical behaviour in terms of the trained models and probabilities you will see in the normal running of your node.
- As this collector does most of the work in Python itself, with [PyOD](https://pyod.readthedocs.io/en/latest/) leveraging [numba](https://numba.pydata.org/) under the hood, you may want to try it out first on a test or development system to get a sense of its performance characteristics on a node similar to where you would like to use it.
@@ -231,7 +235,7 @@ If you would like to go deeper on what exactly the anomalies collector is doing
- If you activate this collector on a fresh node, it might take a little while to build up enough data to calculate a realistic and useful model.
- Some models like `iforest` can be comparatively expensive (on same n1-standard-2 system above ~2s runtime during predict, ~40s training time, ~50% cpu on both train and predict) so if you would like to use it you might be advised to set a relatively high `update_every` maybe 10, 15 or 30 in `anomalies.conf`.
- Setting a higher `train_every_n` and `update_every` is an easy way to devote less resources on the node to anomaly detection. Specifying less charts and a lower `train_n_secs` will also help reduce resources at the expense of covering less charts and maybe a more noisy model if you set `train_n_secs` to be too small for how your node tends to behave.
-- If you would like to enable this on a Rasberry Pi, then check out [this guide](https://learn.netdata.cloud/guides/monitor/raspberry-pi-anomaly-detection) which will guide you through first installing LLVM.
+- If you would like to enable this on a Raspberry Pi, then check out [this guide](https://github.com/netdata/netdata/blob/master/docs/guides/monitor/raspberry-pi-anomaly-detection.md) which will guide you through first installing LLVM.
## Useful links and further reading
diff --git a/collectors/python.d.plugin/beanstalk/README.md b/collectors/python.d.plugin/beanstalk/README.md
index 3b632597e..7e7f30de9 100644
--- a/collectors/python.d.plugin/beanstalk/README.md
+++ b/collectors/python.d.plugin/beanstalk/README.md
@@ -1,7 +1,10 @@
<!--
title: "Beanstalk monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/beanstalk/README.md"
sidebar_label: "Beanstalk"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Message brokers"
-->
# Beanstalk monitoring with Netdata
@@ -112,7 +115,7 @@ Provides server and tube-level statistics.
## Configuration
Edit the `python.d/beanstalk.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/bind_rndc/README.md b/collectors/python.d.plugin/bind_rndc/README.md
index 2d747f81b..e87001884 100644
--- a/collectors/python.d.plugin/bind_rndc/README.md
+++ b/collectors/python.d.plugin/bind_rndc/README.md
@@ -1,7 +1,10 @@
<!--
title: "ISC Bind monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/bind_rndc/README.md"
sidebar_label: "ISC Bind"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# ISC Bind monitoring with Netdata
@@ -58,7 +61,7 @@ It produces:
## Configuration
Edit the `python.d/bind_rndc.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/boinc/README.md b/collectors/python.d.plugin/boinc/README.md
index 4da2d52bf..149d37ca1 100644
--- a/collectors/python.d.plugin/boinc/README.md
+++ b/collectors/python.d.plugin/boinc/README.md
@@ -1,7 +1,10 @@
<!--
title: "BOINC monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/boinc/README.md"
sidebar_label: "BOINC"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Distributed computing"
-->
# BOINC monitoring with Netdata
@@ -13,7 +16,7 @@ It provides charts tracking the total number of tasks and active tasks, as well
## Configuration
Edit the `python.d/boinc.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/ceph/README.md b/collectors/python.d.plugin/ceph/README.md
index b75ba6d4f..e7d0f51e2 100644
--- a/collectors/python.d.plugin/ceph/README.md
+++ b/collectors/python.d.plugin/ceph/README.md
@@ -1,7 +1,10 @@
<!--
title: "CEPH monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ceph/README.md"
sidebar_label: "CEPH"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Storage"
-->
# CEPH monitoring with Netdata
@@ -28,7 +31,7 @@ Monitors the ceph cluster usage and consumption data of a server, and produces:
## Configuration
Edit the `python.d/ceph.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/changefinder/README.md b/collectors/python.d.plugin/changefinder/README.md
index 7ec3a2539..326a69dd5 100644
--- a/collectors/python.d.plugin/changefinder/README.md
+++ b/collectors/python.d.plugin/changefinder/README.md
@@ -1,7 +1,11 @@
<!--
title: "Online change point detection with Netdata"
description: "Use ML-driven change point detection to narrow your focus and shorten root cause analysis."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/changefinder/README.md"
+sidebar_label: "changefinder"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/QoS"
-->
# Online changepoint detection with Netdata
@@ -93,7 +97,7 @@ leave the `changefinder.conf` file alone to begin with. Then you can return to i
a bit more once the collector is running for a while and you have a feeling for its performance on your node._
Edit the `python.d/changefinder.conf` configuration file using `edit-config` from the your
-agent's [config directory](/docs/configure/nodes.md), which is usually at `/etc/netdata`.
+agent's [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is usually at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/dockerd/Makefile.inc b/collectors/python.d.plugin/dockerd/Makefile.inc
deleted file mode 100644
index b100bc6a1..000000000
--- a/collectors/python.d.plugin/dockerd/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += dockerd/dockerd.chart.py
-dist_pythonconfig_DATA += dockerd/dockerd.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += dockerd/README.md dockerd/Makefile.inc
-
diff --git a/collectors/python.d.plugin/dockerd/README.md b/collectors/python.d.plugin/dockerd/README.md
deleted file mode 100644
index 6470a7c0b..000000000
--- a/collectors/python.d.plugin/dockerd/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-<!--
-title: "Docker Engine monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dockerd/README.md
-sidebar_label: "Docker Engine"
--->
-
-# Docker Engine monitoring with Netdata
-
-Collects docker container health metrics.
-
-**Requirement:**
-
-- `docker` package, required version 3.2.0+
-
-Following charts are drawn:
-
-1. **running containers**
-
- - count
-
-2. **healthy containers**
-
- - count
-
-3. **unhealthy containers**
-
- - count
-
-## Configuration
-
-Edit the `python.d/dockerd.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/dockerd.conf
-```
-
-```yaml
- update_every : 1
- priority : 60000
-```
-
----
-
-
diff --git a/collectors/python.d.plugin/dockerd/dockerd.chart.py b/collectors/python.d.plugin/dockerd/dockerd.chart.py
deleted file mode 100644
index bd9640bbf..000000000
--- a/collectors/python.d.plugin/dockerd/dockerd.chart.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: docker netdata python.d module
-# Author: Kévin Darcel (@tuxity)
-
-try:
- import docker
-
- HAS_DOCKER = True
-except ImportError:
- HAS_DOCKER = False
-
-from distutils.version import StrictVersion
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'running_containers',
- 'healthy_containers',
- 'unhealthy_containers'
-]
-
-CHARTS = {
- 'running_containers': {
- 'options': [None, 'Number of running containers', 'containers', 'running containers',
- 'docker.running_containers', 'line'],
- 'lines': [
- ['running_containers', 'running']
- ]
- },
- 'healthy_containers': {
- 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
- 'docker.healthy_containers', 'line'],
- 'lines': [
- ['healthy_containers', 'healthy']
- ]
- },
- 'unhealthy_containers': {
- 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
- 'docker.unhealthy_containers', 'line'],
- 'lines': [
- ['unhealthy_containers', 'unhealthy']
- ]
- }
-}
-
-MIN_REQUIRED_VERSION = '3.2.0'
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.client = None
-
- def check(self):
- if not HAS_DOCKER:
- self.error("'docker' package is needed to use dockerd module")
- return False
-
- if StrictVersion(docker.__version__) < StrictVersion(MIN_REQUIRED_VERSION):
- self.error("installed 'docker' package version {0}, minimum required version {1}, please upgrade".format(
- docker.__version__,
- MIN_REQUIRED_VERSION,
- ))
- return False
-
- self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
-
- try:
- self.client.ping()
- except docker.errors.APIError as error:
- self.error(error)
- return False
-
- return True
-
- def get_data(self):
- data = dict()
-
- data['running_containers'] = len(self.client.containers.list(sparse=True))
- data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
- data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))
-
- return data or None
diff --git a/collectors/python.d.plugin/dockerd/dockerd.conf b/collectors/python.d.plugin/dockerd/dockerd.conf
deleted file mode 100644
index 96c8ee0d8..000000000
--- a/collectors/python.d.plugin/dockerd/dockerd.conf
+++ /dev/null
@@ -1,77 +0,0 @@
-# netdata python.d.plugin configuration for dockerd health data API
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, dockerd plugin also supports the following:
-#
-# url: '<scheme>://<host>:<port>/<health_page_api>'
-# # http://localhost:8080/health
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-local:
- url: 'unix://var/run/docker.sock'
diff --git a/collectors/python.d.plugin/dovecot/README.md b/collectors/python.d.plugin/dovecot/README.md
index e6bbf0d74..358f1ba81 100644
--- a/collectors/python.d.plugin/dovecot/README.md
+++ b/collectors/python.d.plugin/dovecot/README.md
@@ -1,7 +1,10 @@
<!--
title: "Dovecot monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/dovecot/README.md"
sidebar_label: "Dovecot"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Dovecot monitoring with Netdata
@@ -78,7 +81,7 @@ Module gives information with following charts:
## Configuration
Edit the `python.d/dovecot.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/example/README.md b/collectors/python.d.plugin/example/README.md
index 0b80aa9ea..7e6d2b913 100644
--- a/collectors/python.d.plugin/example/README.md
+++ b/collectors/python.d.plugin/example/README.md
@@ -1,6 +1,10 @@
<!--
-title: "Example"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md
+title: "Example module in Python"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/example/README.md"
+sidebar_label: "Example module in Python"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Mock Collectors"
-->
# Example
@@ -9,6 +13,6 @@ You can add custom data collectors using Python.
Netdata provides an [example python data collection module](https://github.com/netdata/netdata/tree/master/collectors/python.d.plugin/example).
-If you want to write your own collector, read our [writing a new Python module](/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
+If you want to write your own collector, read our [writing a new Python module](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
diff --git a/collectors/python.d.plugin/exim/README.md b/collectors/python.d.plugin/exim/README.md
index 92b2d7a5b..a9c66c057 100644
--- a/collectors/python.d.plugin/exim/README.md
+++ b/collectors/python.d.plugin/exim/README.md
@@ -1,7 +1,10 @@
<!--
title: "Exim monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/exim/README.md"
sidebar_label: "Exim"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Exim monitoring with Netdata
diff --git a/collectors/python.d.plugin/fail2ban/README.md b/collectors/python.d.plugin/fail2ban/README.md
index be09e1857..6b2c6bba1 100644
--- a/collectors/python.d.plugin/fail2ban/README.md
+++ b/collectors/python.d.plugin/fail2ban/README.md
@@ -1,7 +1,10 @@
<!--
title: "Fail2ban monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/fail2ban/README.md"
sidebar_label: "Fail2ban"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Apps"
-->
# Fail2ban monitoring with Netdata
@@ -58,7 +61,7 @@ To persist the changes after rotating the log file, add `create 640 root netdata
## Configuration
Edit the `python.d/fail2ban.conf` configuration file using `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/gearman/README.md b/collectors/python.d.plugin/gearman/README.md
index 34ea584ab..9ac53cb8e 100644
--- a/collectors/python.d.plugin/gearman/README.md
+++ b/collectors/python.d.plugin/gearman/README.md
@@ -1,7 +1,10 @@
<!--
title: "Gearman monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/gearman/README.md"
sidebar_label: "Gearman"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Distributed computing"
-->
# Gearman monitoring with Netdata
@@ -27,7 +30,7 @@ It produces:
## Configuration
Edit the `python.d/gearman.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/go_expvar/README.md b/collectors/python.d.plugin/go_expvar/README.md
index feb150dd9..ff786e7c4 100644
--- a/collectors/python.d.plugin/go_expvar/README.md
+++ b/collectors/python.d.plugin/go_expvar/README.md
@@ -1,7 +1,10 @@
<!--
title: "Go applications monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/go_expvar/README.md"
sidebar_label: "Go applications"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Application Performance Monitoring"
-->
# Go applications monitoring with Netdata
@@ -209,8 +212,8 @@ See [this issue](https://github.com/netdata/netdata/pull/1902#issuecomment-28449
Please see these two links to the official Netdata documentation for more information about the values:
-- [External plugins - charts](/collectors/plugins.d/README.md#chart)
-- [Chart variables](/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
+- [External plugins - charts](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#chart)
+- [Chart variables](https://github.com/netdata/netdata/blob/master/collectors/python.d.plugin/README.md#global-variables-order-and-chart)
**Line definitions**
@@ -233,7 +236,7 @@ hidden: False
```
Please see the following link for more information about the options and their default values:
-[External plugins - dimensions](/collectors/plugins.d/README.md#dimension)
+[External plugins - dimensions](https://github.com/netdata/netdata/blob/master/collectors/plugins.d/README.md#dimension)
Apart from top-level expvars, this plugin can also parse expvars stored in a multi-level map;
All dicts in the resulting JSON document are then flattened to one level.
@@ -255,7 +258,7 @@ the first defined key wins and all subsequent keys with the same name are ignore
## Enable the collector
The `go_expvar` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -268,7 +271,7 @@ restart netdata`, or the appropriate method for your system, to finish enabling
## Configuration
Edit the `python.d/go_expvar.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/haproxy/README.md b/collectors/python.d.plugin/haproxy/README.md
index f16e7258e..1aa1a214a 100644
--- a/collectors/python.d.plugin/haproxy/README.md
+++ b/collectors/python.d.plugin/haproxy/README.md
@@ -1,7 +1,10 @@
<!--
title: "HAProxy monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md
-sidebar_label: "HAProxy"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/haproxy/README.md"
+sidebar_label: "haproxy-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# HAProxy monitoring with Netdata
@@ -39,7 +42,7 @@ It produces:
## Configuration
Edit the `python.d/haproxy.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/hddtemp/README.md b/collectors/python.d.plugin/hddtemp/README.md
index d8aba62d2..6a253b5bf 100644
--- a/collectors/python.d.plugin/hddtemp/README.md
+++ b/collectors/python.d.plugin/hddtemp/README.md
@@ -1,7 +1,10 @@
<!--
title: "Hard drive temperature monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hddtemp/README.md"
sidebar_label: "Hard drive temperature"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Hardware"
-->
# Hard drive temperature monitoring with Netdata
@@ -16,7 +19,7 @@ It produces one chart **Temperature** with dynamic number of dimensions (one per
## Configuration
Edit the `python.d/hddtemp.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/hpssa/README.md b/collectors/python.d.plugin/hpssa/README.md
index c1d218279..72dc78032 100644
--- a/collectors/python.d.plugin/hpssa/README.md
+++ b/collectors/python.d.plugin/hpssa/README.md
@@ -1,7 +1,10 @@
<!--
title: "HP Smart Storage Arrays monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/hpssa/README.md"
sidebar_label: "HP Smart Storage Arrays"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Storage"
-->
# HP Smart Storage Arrays monitoring with Netdata
@@ -51,7 +54,7 @@ systemctl restart netdata.service
## Enable the collector
The `hpssa` collector is disabled by default. To enable it, use `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
file.
```bash
@@ -60,12 +63,12 @@ sudo ./edit-config python.d.conf
```
Change the value of the `hpssa` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
-restart netdata`, or the [appropriate method](/docs/configure/start-stop-restart.md) for your system.
+restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Configuration
Edit the `python.d/hpssa.conf` configuration file using `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -79,5 +82,5 @@ ssacli_path: /usr/sbin/ssacli
```
Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
diff --git a/collectors/python.d.plugin/icecast/README.md b/collectors/python.d.plugin/icecast/README.md
index c122f76a3..6fca34ba6 100644
--- a/collectors/python.d.plugin/icecast/README.md
+++ b/collectors/python.d.plugin/icecast/README.md
@@ -1,7 +1,10 @@
<!--
title: "Icecast monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/icecast/README.md"
sidebar_label: "Icecast"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Networking"
-->
# Icecast monitoring with Netdata
@@ -21,7 +24,7 @@ It produces the following charts:
## Configuration
Edit the `python.d/icecast.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/ipfs/README.md b/collectors/python.d.plugin/ipfs/README.md
index 3a7c43632..8f5e53b10 100644
--- a/collectors/python.d.plugin/ipfs/README.md
+++ b/collectors/python.d.plugin/ipfs/README.md
@@ -1,7 +1,10 @@
<!--
title: "IPFS monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ipfs/README.md"
sidebar_label: "IPFS"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Storage"
-->
# IPFS monitoring with Netdata
@@ -20,7 +23,7 @@ It produces the following charts:
## Configuration
Edit the `python.d/ipfs.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/litespeed/README.md b/collectors/python.d.plugin/litespeed/README.md
index b58b23d7e..b9bad4635 100644
--- a/collectors/python.d.plugin/litespeed/README.md
+++ b/collectors/python.d.plugin/litespeed/README.md
@@ -1,7 +1,10 @@
<!--
title: "LiteSpeed monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/litespeed/README.md"
sidebar_label: "LiteSpeed"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Application Performance Monitoring"
-->
# LiteSpeed monitoring with Netdata
@@ -53,7 +56,7 @@ It produces:
## Configuration
Edit the `python.d/litespeed.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/logind/Makefile.inc b/collectors/python.d.plugin/logind/Makefile.inc
deleted file mode 100644
index adadab120..000000000
--- a/collectors/python.d.plugin/logind/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += logind/logind.chart.py
-dist_pythonconfig_DATA += logind/logind.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += logind/README.md logind/Makefile.inc
-
diff --git a/collectors/python.d.plugin/logind/README.md b/collectors/python.d.plugin/logind/README.md
deleted file mode 100644
index 442d388d0..000000000
--- a/collectors/python.d.plugin/logind/README.md
+++ /dev/null
@@ -1,86 +0,0 @@
-<!--
-title: "systemd-logind monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/logind/README.md
-sidebar_label: "systemd-logind"
--->
-
-# Systemd-Logind monitoring with Netdata
-
-Monitors active sessions, users, and seats tracked by `systemd-logind` or `elogind`.
-
-It provides the following charts:
-
-1. **Sessions** Tracks the total number of sessions.
-
- - Graphical: Local graphical sessions (running X11, or Wayland, or something else).
- - Console: Local console sessions.
- - Remote: Remote sessions.
-
-2. **Users** Tracks total number of unique user logins of each type.
-
- - Graphical
- - Console
- - Remote
-
-3. **Seats** Total number of seats in use.
-
- - Seats
-
-## Enable the collector
-
-The `logind` collector is disabled by default. To enable it, use `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf` file.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d.conf
-```
-
-Change the value of the `logind` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
-restart netdata`, or the appropriate method for your system, to finish enabling the `logind` collector.
-
-## Configuration
-
-This module needs no configuration. Just make sure the `netdata` user
-can run the `loginctl` command and get a session list without having to
-specify a path.
-
-This will work with any command that can output data in the _exact_
-same format as `loginctl list-sessions --no-legend`. If you have some
-other command you want to use that outputs data in this format, you can
-specify it using the `command` key like so:
-
-```yaml
-command: '/path/to/other/command'
-```
-
-Edit the `python.d/logind.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/logind.conf
-```
-
-## Notes
-
-- This module's ability to track logins is dependent on what PAM services
- are configured to register sessions with logind. In particular, for
- most systems, it will only track TTY logins, local desktop logins,
- and logins through remote shell connections.
-
-- The users chart counts _usernames_ not UID's. This is potentially
- important in configurations where multiple users have the same UID.
-
-- The users chart counts any given user name up to once for _each_ type
- of login. So if the same user has a graphical and a console login on a
- system, they will show up once in the graphical count, and once in the
- console count.
-
-- Because the data collection process is rather expensive, this plugin
- is currently disabled by default, and needs to be explicitly enabled in
- `/etc/netdata/python.d.conf` before it will run.
-
----
-
-
diff --git a/collectors/python.d.plugin/logind/logind.chart.py b/collectors/python.d.plugin/logind/logind.chart.py
deleted file mode 100644
index 708668649..000000000
--- a/collectors/python.d.plugin/logind/logind.chart.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: logind netdata python.d module
-# Author: Austin S. Hemmelgarn (Ferroin)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-priority = 59999
-disabled_by_default = True
-
-LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
-
-ORDER = [
- 'sessions',
- 'users',
- 'seats',
-]
-
-CHARTS = {
- 'sessions': {
- 'options': [None, 'Logind Sessions', 'sessions', 'sessions', 'logind.sessions', 'stacked'],
- 'lines': [
- ['sessions_graphical', 'Graphical', 'absolute', 1, 1],
- ['sessions_console', 'Console', 'absolute', 1, 1],
- ['sessions_remote', 'Remote', 'absolute', 1, 1]
- ]
- },
- 'users': {
- 'options': [None, 'Logind Users', 'users', 'users', 'logind.users', 'stacked'],
- 'lines': [
- ['users_graphical', 'Graphical', 'absolute', 1, 1],
- ['users_console', 'Console', 'absolute', 1, 1],
- ['users_remote', 'Remote', 'absolute', 1, 1]
- ]
- },
- 'seats': {
- 'options': [None, 'Logind Seats', 'seats', 'seats', 'logind.seats', 'line'],
- 'lines': [
- ['seats', 'Active Seats', 'absolute', 1, 1]
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = LOGINCTL_COMMAND
-
- def _get_data(self):
- ret = {
- 'sessions_graphical': 0,
- 'sessions_console': 0,
- 'sessions_remote': 0,
- }
- users = {
- 'graphical': list(),
- 'console': list(),
- 'remote': list()
- }
- seats = list()
- data = self._get_raw_data()
-
- for item in data:
- fields = item.split()
- if len(fields) == 3:
- users['remote'].append(fields[2])
- ret['sessions_remote'] += 1
- elif len(fields) == 4:
- users['graphical'].append(fields[2])
- ret['sessions_graphical'] += 1
- seats.append(fields[3])
- elif len(fields) == 5:
- users['console'].append(fields[2])
- ret['sessions_console'] += 1
- seats.append(fields[3])
-
- ret['users_graphical'] = len(set(users['graphical']))
- ret['users_console'] = len(set(users['console']))
- ret['users_remote'] = len(set(users['remote']))
- ret['seats'] = len(set(seats))
-
- return ret
diff --git a/collectors/python.d.plugin/logind/logind.conf b/collectors/python.d.plugin/logind/logind.conf
deleted file mode 100644
index 01a859d21..000000000
--- a/collectors/python.d.plugin/logind/logind.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for logind
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
diff --git a/collectors/python.d.plugin/megacli/README.md b/collectors/python.d.plugin/megacli/README.md
index 3c99c3de8..3900de381 100644
--- a/collectors/python.d.plugin/megacli/README.md
+++ b/collectors/python.d.plugin/megacli/README.md
@@ -1,7 +1,10 @@
<!--
title: "MegaRAID controller monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/megacli/README.md"
sidebar_label: "MegaRAID controllers"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Devices"
-->
# MegaRAID controller monitoring with Netdata
@@ -53,7 +56,7 @@ systemctl restart netdata.service
## Enable the collector
The `megacli` collector is disabled by default. To enable it, use `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
file.
```bash
@@ -67,7 +70,7 @@ with `sudo systemctl restart netdata`, or the appropriate method for your system
## Configuration
Edit the `python.d/megacli.conf` configuration file using `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -81,6 +84,6 @@ do_battery: yes
```
Save the file and restart the Netdata Agent with `sudo systemctl restart netdata`, or the [appropriate
-method](/docs/configure/start-stop-restart.md) for your system.
+method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
diff --git a/collectors/python.d.plugin/memcached/README.md b/collectors/python.d.plugin/memcached/README.md
index 19139ee92..4158ab19c 100644
--- a/collectors/python.d.plugin/memcached/README.md
+++ b/collectors/python.d.plugin/memcached/README.md
@@ -1,7 +1,10 @@
<!--
title: "Memcached monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/memcached/README.md"
sidebar_label: "Memcached"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Databases"
-->
# Memcached monitoring with Netdata
@@ -76,7 +79,7 @@ Collects memory-caching system performance metrics. It reads server response to
## Configuration
Edit the `python.d/memcached.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/mongodb/Makefile.inc b/collectors/python.d.plugin/mongodb/Makefile.inc
deleted file mode 100644
index 784945aa6..000000000
--- a/collectors/python.d.plugin/mongodb/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += mongodb/mongodb.chart.py
-dist_pythonconfig_DATA += mongodb/mongodb.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += mongodb/README.md mongodb/Makefile.inc
-
diff --git a/collectors/python.d.plugin/mongodb/README.md b/collectors/python.d.plugin/mongodb/README.md
deleted file mode 100644
index b6dd9c5f4..000000000
--- a/collectors/python.d.plugin/mongodb/README.md
+++ /dev/null
@@ -1,210 +0,0 @@
-<!--
-title: "MongoDB monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/mongodb/README.md
-sidebar_label: "MongoDB"
--->
-
-# MongoDB monitoring with Netdata
-
-Monitors performance and health metrics of MongoDB.
-
-## Requirements
-
-- `python-pymongo` package v2.4+.
-
-You need to install it manually.
-
-Number of charts depends on mongodb version, storage engine and other features (replication):
-
-1. **Read requests**:
-
- - query
- - getmore (operation the cursor executes to get additional data from query)
-
-2. **Write requests**:
-
- - insert
- - delete
- - update
-
-3. **Active clients**:
-
- - readers (number of clients with read operations in progress or queued)
- - writers (number of clients with write operations in progress or queued)
-
-4. **Journal transactions**:
-
- - commits (count of transactions that have been written to the journal)
-
-5. **Data written to the journal**:
-
- - volume (volume of data)
-
-6. **Background flush** (MMAPv1):
-
- - average ms (average time taken by flushes to execute)
- - last ms (time taken by the last flush)
-
-7. **Read tickets** (WiredTiger):
-
- - in use (number of read tickets in use)
- - available (number of available read tickets remaining)
-
-8. **Write tickets** (WiredTiger):
-
- - in use (number of write tickets in use)
- - available (number of available write tickets remaining)
-
-9. **Cursors**:
-
-- opened (number of cursors currently opened by MongoDB for clients)
-- timedOut (number of cursors that have timed)
-- noTimeout (number of open cursors with timeout disabled)
-
-10. **Connections**:
-
- - connected (number of clients currently connected to the database server)
- - unused (number of unused connections available for new clients)
-
-11. **Memory usage metrics**:
-
- - virtual
- - resident (amount of memory used by the database process)
- - mapped
- - non mapped
-
-12. **Page faults**:
-
- - page faults (number of times MongoDB had to request from disk)
-
-13. **Cache metrics** (WiredTiger):
-
- - percentage of bytes currently in the cache (amount of space taken by cached data)
- - percentage of tracked dirty bytes in the cache (amount of space taken by dirty data)
-
-14. **Pages evicted from cache** (WiredTiger):
-
- - modified
- - unmodified
-
-15. **Queued requests**:
-
- - readers (number of read request currently queued)
- - writers (number of write request currently queued)
-
-16. **Errors**:
-
- - msg (number of message assertions raised)
- - warning (number of warning assertions raised)
- - regular (number of regular assertions raised)
- - user (number of assertions corresponding to errors generated by users)
-
-17. **Storage metrics** (one chart for every database)
-
- - dataSize (size of all documents + padding in the database)
- - indexSize (size of all indexes in the database)
- - storageSize (size of all extents in the database)
-
-18. **Documents in the database** (one chart for all databases)
-
-- documents (number of objects in the database among all the collections)
-
-19. **tcmalloc metrics**
-
- - central cache free
- - current total thread cache
- - pageheap free
- - pageheap unmapped
- - thread cache free
- - transfer cache free
- - heap size
-
-20. **Commands total/failed rate**
-
- - count
- - createIndex
- - delete
- - eval
- - findAndModify
- - insert
-
-21. **Locks metrics** (acquireCount metrics - number of times the lock was acquired in the specified mode)
-
- - Global lock
- - Database lock
- - Collection lock
- - Metadata lock
- - oplog lock
-
-22. **Replica set members state**
-
- - state
-
-23. **Oplog window**
-
- - window (interval of time between the oldest and the latest entries in the oplog)
-
-24. **Replication lag**
-
- - member (time when last entry from the oplog was applied for every member)
-
-25. **Replication set member heartbeat latency**
-
- - member (time when last heartbeat was received from replica set member)
-
-## Prerequisite
-
-Create a read-only user for Netdata in the admin database.
-
-1. Authenticate as the admin user.
-
-```
-use admin
-db.auth("admin", "<MONGODB_ADMIN_PASSWORD>")
-```
-
-2. Create a user.
-
-```
-# MongoDB 2.x.
-db.addUser("netdata", "<UNIQUE_PASSWORD>", true)
-
-# MongoDB 3.x or higher.
-db.createUser({
- "user":"netdata",
- "pwd": "<UNIQUE_PASSWORD>",
- "roles" : [
- {role: 'read', db: 'admin' },
- {role: 'clusterMonitor', db: 'admin'},
- {role: 'read', db: 'local' }
- ]
-})
-```
-
-## Configuration
-
-Edit the `python.d/mongodb.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/mongodb.conf
-```
-
-Sample:
-
-```yaml
-local:
- name : 'local'
- authdb: 'admin'
- host : '127.0.0.1'
- port : 27017
- user : 'netdata'
- pass : 'netdata'
-```
-
-If no configuration is given, module will attempt to connect to mongodb daemon on `127.0.0.1:27017` address
-
----
-
-
diff --git a/collectors/python.d.plugin/mongodb/mongodb.chart.py b/collectors/python.d.plugin/mongodb/mongodb.chart.py
deleted file mode 100644
index 5e8fec834..000000000
--- a/collectors/python.d.plugin/mongodb/mongodb.chart.py
+++ /dev/null
@@ -1,786 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: mongodb netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import ssl
-
-from copy import deepcopy
-from datetime import datetime
-from sys import exc_info
-
-try:
- from pymongo import MongoClient, ASCENDING, DESCENDING, version_tuple
- from pymongo.errors import PyMongoError
-
- PYMONGO = True
-except ImportError:
- PYMONGO = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-REPL_SET_STATES = [
- ('1', 'primary'),
- ('8', 'down'),
- ('2', 'secondary'),
- ('3', 'recovering'),
- ('5', 'startup2'),
- ('4', 'fatal'),
- ('7', 'arbiter'),
- ('6', 'unknown'),
- ('9', 'rollback'),
- ('10', 'removed'),
- ('0', 'startup')
-]
-
-
-def multiply_by_100(value):
- return value * 100
-
-
-DEFAULT_METRICS = [
- ('opcounters.delete', None, None),
- ('opcounters.update', None, None),
- ('opcounters.insert', None, None),
- ('opcounters.query', None, None),
- ('opcounters.getmore', None, None),
- ('globalLock.activeClients.readers', 'activeClients_readers', None),
- ('globalLock.activeClients.writers', 'activeClients_writers', None),
- ('connections.available', 'connections_available', None),
- ('connections.current', 'connections_current', None),
- ('mem.mapped', None, None),
- ('mem.resident', None, None),
- ('mem.virtual', None, None),
- ('globalLock.currentQueue.readers', 'currentQueue_readers', None),
- ('globalLock.currentQueue.writers', 'currentQueue_writers', None),
- ('asserts.msg', None, None),
- ('asserts.regular', None, None),
- ('asserts.user', None, None),
- ('asserts.warning', None, None),
- ('extra_info.page_faults', None, None),
- ('metrics.record.moves', None, None),
- ('backgroundFlushing.average_ms', None, multiply_by_100),
- ('backgroundFlushing.last_ms', None, multiply_by_100),
- ('backgroundFlushing.flushes', None, multiply_by_100),
- ('metrics.cursor.timedOut', None, None),
- ('metrics.cursor.open.total', 'cursor_total', None),
- ('metrics.cursor.open.noTimeout', None, None),
- ('cursors.timedOut', None, None),
- ('cursors.totalOpen', 'cursor_total', None)
-]
-
-DUR = [
- ('dur.commits', None, None),
- ('dur.journaledMB', None, multiply_by_100)
-]
-
-WIREDTIGER = [
- ('wiredTiger.concurrentTransactions.read.available', 'wiredTigerRead_available', None),
- ('wiredTiger.concurrentTransactions.read.out', 'wiredTigerRead_out', None),
- ('wiredTiger.concurrentTransactions.write.available', 'wiredTigerWrite_available', None),
- ('wiredTiger.concurrentTransactions.write.out', 'wiredTigerWrite_out', None),
- ('wiredTiger.cache.bytes currently in the cache', None, None),
- ('wiredTiger.cache.tracked dirty bytes in the cache', None, None),
- ('wiredTiger.cache.maximum bytes configured', None, None),
- ('wiredTiger.cache.unmodified pages evicted', 'unmodified', None),
- ('wiredTiger.cache.modified pages evicted', 'modified', None)
-]
-
-TCMALLOC = [
- ('tcmalloc.generic.current_allocated_bytes', None, None),
- ('tcmalloc.generic.heap_size', None, None),
- ('tcmalloc.tcmalloc.central_cache_free_bytes', None, None),
- ('tcmalloc.tcmalloc.current_total_thread_cache_bytes', None, None),
- ('tcmalloc.tcmalloc.pageheap_free_bytes', None, None),
- ('tcmalloc.tcmalloc.pageheap_unmapped_bytes', None, None),
- ('tcmalloc.tcmalloc.thread_cache_free_bytes', None, None),
- ('tcmalloc.tcmalloc.transfer_cache_free_bytes', None, None)
-]
-
-COMMANDS = [
- ('metrics.commands.count.total', 'count_total', None),
- ('metrics.commands.createIndexes.total', 'createIndexes_total', None),
- ('metrics.commands.delete.total', 'delete_total', None),
- ('metrics.commands.eval.total', 'eval_total', None),
- ('metrics.commands.findAndModify.total', 'findAndModify_total', None),
- ('metrics.commands.insert.total', 'insert_total', None),
- ('metrics.commands.delete.total', 'delete_total', None),
- ('metrics.commands.count.failed', 'count_failed', None),
- ('metrics.commands.createIndexes.failed', 'createIndexes_failed', None),
- ('metrics.commands.delete.failed', 'delete_failed', None),
- ('metrics.commands.eval.failed', 'eval_failed', None),
- ('metrics.commands.findAndModify.failed', 'findAndModify_failed', None),
- ('metrics.commands.insert.failed', 'insert_failed', None),
- ('metrics.commands.delete.failed', 'delete_failed', None)
-]
-
-LOCKS = [
- ('locks.Collection.acquireCount.R', 'Collection_R', None),
- ('locks.Collection.acquireCount.r', 'Collection_r', None),
- ('locks.Collection.acquireCount.W', 'Collection_W', None),
- ('locks.Collection.acquireCount.w', 'Collection_w', None),
- ('locks.Database.acquireCount.R', 'Database_R', None),
- ('locks.Database.acquireCount.r', 'Database_r', None),
- ('locks.Database.acquireCount.W', 'Database_W', None),
- ('locks.Database.acquireCount.w', 'Database_w', None),
- ('locks.Global.acquireCount.R', 'Global_R', None),
- ('locks.Global.acquireCount.r', 'Global_r', None),
- ('locks.Global.acquireCount.W', 'Global_W', None),
- ('locks.Global.acquireCount.w', 'Global_w', None),
- ('locks.Metadata.acquireCount.R', 'Metadata_R', None),
- ('locks.Metadata.acquireCount.w', 'Metadata_w', None),
- ('locks.oplog.acquireCount.r', 'oplog_r', None),
- ('locks.oplog.acquireCount.w', 'oplog_w', None)
-]
-
-DBSTATS = [
- 'dataSize',
- 'indexSize',
- 'storageSize',
- 'objects'
-]
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'read_operations',
- 'write_operations',
- 'active_clients',
- 'journaling_transactions',
- 'journaling_volume',
- 'background_flush_average',
- 'background_flush_last',
- 'background_flush_rate',
- 'wiredtiger_read',
- 'wiredtiger_write',
- 'cursors',
- 'connections',
- 'memory',
- 'page_faults',
- 'queued_requests',
- 'record_moves',
- 'wiredtiger_cache',
- 'wiredtiger_pages_evicted',
- 'asserts',
- 'locks_collection',
- 'locks_database',
- 'locks_global',
- 'locks_metadata',
- 'locks_oplog',
- 'dbstats_objects',
- 'tcmalloc_generic',
- 'tcmalloc_metrics',
- 'command_total_rate',
- 'command_failed_rate'
-]
-
-CHARTS = {
- 'read_operations': {
- 'options': [None, 'Received read requests', 'requests/s', 'throughput metrics',
- 'mongodb.read_operations', 'line'],
- 'lines': [
- ['query', None, 'incremental'],
- ['getmore', None, 'incremental']
- ]
- },
- 'write_operations': {
- 'options': [None, 'Received write requests', 'requests/s', 'throughput metrics',
- 'mongodb.write_operations', 'line'],
- 'lines': [
- ['insert', None, 'incremental'],
- ['update', None, 'incremental'],
- ['delete', None, 'incremental']
- ]
- },
- 'active_clients': {
- 'options': [None, 'Clients with read or write operations in progress or queued', 'clients',
- 'throughput metrics', 'mongodb.active_clients', 'line'],
- 'lines': [
- ['activeClients_readers', 'readers', 'absolute'],
- ['activeClients_writers', 'writers', 'absolute']
- ]
- },
- 'journaling_transactions': {
- 'options': [None, 'Transactions that have been written to the journal', 'commits',
- 'database performance', 'mongodb.journaling_transactions', 'line'],
- 'lines': [
- ['commits', None, 'absolute']
- ]
- },
- 'journaling_volume': {
- 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
- 'mongodb.journaling_volume', 'line'],
- 'lines': [
- ['journaledMB', 'volume', 'absolute', 1, 100]
- ]
- },
- 'background_flush_average': {
- 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
- 'mongodb.background_flush_average', 'line'],
- 'lines': [
- ['average_ms', 'time', 'absolute', 1, 100]
- ]
- },
- 'background_flush_last': {
- 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
- 'mongodb.background_flush_last', 'line'],
- 'lines': [
- ['last_ms', 'time', 'absolute', 1, 100]
- ]
- },
- 'background_flush_rate': {
- 'options': [None, 'Flushes rate', 'flushes', 'database performance', 'mongodb.background_flush_rate', 'line'],
- 'lines': [
- ['flushes', 'flushes', 'incremental', 1, 1]
- ]
- },
- 'wiredtiger_read': {
- 'options': [None, 'Read tickets in use and remaining', 'tickets', 'database performance',
- 'mongodb.wiredtiger_read', 'stacked'],
- 'lines': [
- ['wiredTigerRead_available', 'available', 'absolute', 1, 1],
- ['wiredTigerRead_out', 'inuse', 'absolute', 1, 1]
- ]
- },
- 'wiredtiger_write': {
- 'options': [None, 'Write tickets in use and remaining', 'tickets', 'database performance',
- 'mongodb.wiredtiger_write', 'stacked'],
- 'lines': [
- ['wiredTigerWrite_available', 'available', 'absolute', 1, 1],
- ['wiredTigerWrite_out', 'inuse', 'absolute', 1, 1]
- ]
- },
- 'cursors': {
- 'options': [None, 'Currently opened cursors, cursors with timeout disabled and timed out cursors',
- 'cursors', 'database performance', 'mongodb.cursors', 'stacked'],
- 'lines': [
- ['cursor_total', 'opened', 'absolute', 1, 1],
- ['noTimeout', None, 'absolute', 1, 1],
- ['timedOut', None, 'incremental', 1, 1]
- ]
- },
- 'connections': {
- 'options': [None, 'Currently connected clients and unused connections', 'connections',
- 'resource utilization', 'mongodb.connections', 'stacked'],
- 'lines': [
- ['connections_available', 'unused', 'absolute', 1, 1],
- ['connections_current', 'connected', 'absolute', 1, 1]
- ]
- },
- 'memory': {
- 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
- 'lines': [
- ['virtual', None, 'absolute', 1, 1],
- ['resident', None, 'absolute', 1, 1],
- ['nonmapped', None, 'absolute', 1, 1],
- ['mapped', None, 'absolute', 1, 1]
- ]
- },
- 'page_faults': {
- 'options': [None, 'Number of times MongoDB had to fetch data from disk', 'request/s',
- 'resource utilization', 'mongodb.page_faults', 'line'],
- 'lines': [
- ['page_faults', None, 'incremental', 1, 1]
- ]
- },
- 'queued_requests': {
- 'options': [None, 'Currently queued read and write requests', 'requests', 'resource saturation',
- 'mongodb.queued_requests', 'line'],
- 'lines': [
- ['currentQueue_readers', 'readers', 'absolute', 1, 1],
- ['currentQueue_writers', 'writers', 'absolute', 1, 1]
- ]
- },
- 'record_moves': {
- 'options': [None, 'Number of times documents had to be moved on-disk', 'number',
- 'resource saturation', 'mongodb.record_moves', 'line'],
- 'lines': [
- ['moves', None, 'incremental', 1, 1]
- ]
- },
- 'asserts': {
- 'options': [
- None,
- 'Number of message, warning, regular, corresponding to errors generated by users assertions raised',
- 'number', 'errors (asserts)', 'mongodb.asserts', 'line'],
- 'lines': [
- ['msg', None, 'incremental', 1, 1],
- ['warning', None, 'incremental', 1, 1],
- ['regular', None, 'incremental', 1, 1],
- ['user', None, 'incremental', 1, 1]
- ]
- },
- 'wiredtiger_cache': {
- 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
- 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
- 'lines': [
- ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
- ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
- ]
- },
- 'wiredtiger_pages_evicted': {
- 'options': [None, 'Pages evicted from the cache',
- 'pages', 'resource utilization', 'mongodb.wiredtiger_pages_evicted', 'stacked'],
- 'lines': [
- ['unmodified', None, 'absolute', 1, 1],
- ['modified', None, 'absolute', 1, 1]
- ]
- },
- 'dbstats_objects': {
- 'options': [None, 'Number of documents in the database among all the collections', 'documents',
- 'storage size metrics', 'mongodb.dbstats_objects', 'stacked'],
- 'lines': []
- },
- 'tcmalloc_generic': {
- 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
- 'lines': [
- ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
- ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
- ]
- },
- 'tcmalloc_metrics': {
- 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
- 'lines': [
- ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
- ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],
- ['pageheap_free_bytes', 'pageheap_free', 'absolute', 1, 1024],
- ['pageheap_unmapped_bytes', 'pageheap_unmapped', 'absolute', 1, 1024],
- ['thread_cache_free_bytes', 'thread_cache_free', 'absolute', 1, 1024],
- ['transfer_cache_free_bytes', 'transfer_cache_free', 'absolute', 1, 1024]
- ]
- },
- 'command_total_rate': {
- 'options': [None, 'Commands total rate', 'commands/s', 'commands', 'mongodb.command_total_rate', 'stacked'],
- 'lines': [
- ['count_total', 'count', 'incremental', 1, 1],
- ['createIndexes_total', 'createIndexes', 'incremental', 1, 1],
- ['delete_total', 'delete', 'incremental', 1, 1],
- ['eval_total', 'eval', 'incremental', 1, 1],
- ['findAndModify_total', 'findAndModify', 'incremental', 1, 1],
- ['insert_total', 'insert', 'incremental', 1, 1],
- ['update_total', 'update', 'incremental', 1, 1]
- ]
- },
- 'command_failed_rate': {
- 'options': [None, 'Commands failed rate', 'commands/s', 'commands', 'mongodb.command_failed_rate', 'stacked'],
- 'lines': [
- ['count_failed', 'count', 'incremental', 1, 1],
- ['createIndexes_failed', 'createIndexes', 'incremental', 1, 1],
- ['delete_failed', 'delete', 'incremental', 1, 1],
- ['eval_failed', 'eval', 'incremental', 1, 1],
- ['findAndModify_failed', 'findAndModify', 'incremental', 1, 1],
- ['insert_failed', 'insert', 'incremental', 1, 1],
- ['update_failed', 'update', 'incremental', 1, 1]
- ]
- },
- 'locks_collection': {
- 'options': [None, 'Collection lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_collection', 'stacked'],
- 'lines': [
- ['Collection_R', 'shared', 'incremental'],
- ['Collection_W', 'exclusive', 'incremental'],
- ['Collection_r', 'intent_shared', 'incremental'],
- ['Collection_w', 'intent_exclusive', 'incremental']
- ]
- },
- 'locks_database': {
- 'options': [None, 'Database lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_database', 'stacked'],
- 'lines': [
- ['Database_R', 'shared', 'incremental'],
- ['Database_W', 'exclusive', 'incremental'],
- ['Database_r', 'intent_shared', 'incremental'],
- ['Database_w', 'intent_exclusive', 'incremental']
- ]
- },
- 'locks_global': {
- 'options': [None, 'Global lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_global', 'stacked'],
- 'lines': [
- ['Global_R', 'shared', 'incremental'],
- ['Global_W', 'exclusive', 'incremental'],
- ['Global_r', 'intent_shared', 'incremental'],
- ['Global_w', 'intent_exclusive', 'incremental']
- ]
- },
- 'locks_metadata': {
- 'options': [None, 'Metadata lock. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_metadata', 'stacked'],
- 'lines': [
- ['Metadata_R', 'shared', 'incremental'],
- ['Metadata_w', 'intent_exclusive', 'incremental']
- ]
- },
- 'locks_oplog': {
- 'options': [None, 'Lock on the oplog. Number of times the lock was acquired in the specified mode',
- 'locks', 'locks metrics', 'mongodb.locks_oplog', 'stacked'],
- 'lines': [
- ['oplog_r', 'intent_shared', 'incremental'],
- ['oplog_w', 'intent_exclusive', 'incremental']
- ]
- }
-}
-
-DEFAULT_HOST = '127.0.0.1'
-DEFAULT_PORT = 27017
-DEFAULT_TIMEOUT = 100
-DEFAULT_AUTHDB = 'admin'
-
-CONN_PARAM_HOST = 'host'
-CONN_PARAM_PORT = 'port'
-CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS = 'serverselectiontimeoutms'
-CONN_PARAM_SSL_SSL = 'ssl'
-CONN_PARAM_SSL_CERT_REQS = 'ssl_cert_reqs'
-CONN_PARAM_SSL_CA_CERTS = 'ssl_ca_certs'
-CONN_PARAM_SSL_CRL_FILE = 'ssl_crlfile'
-CONN_PARAM_SSL_CERT_FILE = 'ssl_certfile'
-CONN_PARAM_SSL_KEY_FILE = 'ssl_keyfile'
-CONN_PARAM_SSL_PEM_PASSPHRASE = 'ssl_pem_passphrase'
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER[:]
- self.definitions = deepcopy(CHARTS)
- self.authdb = self.configuration.get('authdb', DEFAULT_AUTHDB)
- self.user = self.configuration.get('user')
- self.password = self.configuration.get('pass')
- self.metrics_to_collect = deepcopy(DEFAULT_METRICS)
- self.connection = None
- self.do_replica = None
- self.databases = list()
-
- def check(self):
- if not PYMONGO:
- self.error('Pymongo package v2.4+ is needed to use mongodb.chart.py')
- return False
- self.connection, server_status, error = self._create_connection()
- if error:
- self.error(error)
- return False
-
- self.build_metrics_to_collect_(server_status)
-
- try:
- data = self._get_data()
- except (LookupError, SyntaxError, AttributeError):
- self.error('Type: %s, error: %s' % (str(exc_info()[0]), str(exc_info()[1])))
- return False
- if isinstance(data, dict) and data:
- self._data_from_check = data
- self.create_charts_(server_status)
- return True
- self.error('_get_data() returned no data or type is not <dict>')
- return False
-
- def build_metrics_to_collect_(self, server_status):
-
- self.do_replica = 'repl' in server_status
- if 'dur' in server_status:
- self.metrics_to_collect.extend(DUR)
- if 'tcmalloc' in server_status:
- self.metrics_to_collect.extend(TCMALLOC)
- if 'commands' in server_status['metrics']:
- self.metrics_to_collect.extend(COMMANDS)
- if 'wiredTiger' in server_status:
- self.metrics_to_collect.extend(WIREDTIGER)
- has_locks = 'locks' in server_status
- if has_locks and 'Collection' in server_status['locks']:
- self.metrics_to_collect.extend(LOCKS)
-
- def create_charts_(self, server_status):
-
- if 'dur' not in server_status:
- self.order.remove('journaling_transactions')
- self.order.remove('journaling_volume')
-
- if 'backgroundFlushing' not in server_status:
- self.order.remove('background_flush_average')
- self.order.remove('background_flush_last')
- self.order.remove('background_flush_rate')
-
- if 'wiredTiger' not in server_status:
- self.order.remove('wiredtiger_write')
- self.order.remove('wiredtiger_read')
- self.order.remove('wiredtiger_cache')
-
- if 'tcmalloc' not in server_status:
- self.order.remove('tcmalloc_generic')
- self.order.remove('tcmalloc_metrics')
-
- if 'commands' not in server_status['metrics']:
- self.order.remove('command_total_rate')
- self.order.remove('command_failed_rate')
-
- has_no_locks = 'locks' not in server_status
- if has_no_locks or 'Collection' not in server_status['locks']:
- self.order.remove('locks_collection')
- self.order.remove('locks_database')
- self.order.remove('locks_global')
- self.order.remove('locks_metadata')
-
- if has_no_locks or 'oplog' not in server_status['locks']:
- self.order.remove('locks_oplog')
-
- for dbase in self.databases:
- self.order.append('_'.join([dbase, 'dbstats']))
- self.definitions['_'.join([dbase, 'dbstats'])] = {
- 'options': [None, '%s: size of all documents, indexes, extents' % dbase, 'KB',
- 'storage size metrics', 'mongodb.dbstats', 'line'],
- 'lines': [
- ['_'.join([dbase, 'dataSize']), 'documents', 'absolute', 1, 1024],
- ['_'.join([dbase, 'indexSize']), 'indexes', 'absolute', 1, 1024],
- ['_'.join([dbase, 'storageSize']), 'extents', 'absolute', 1, 1024]
- ]}
- self.definitions['dbstats_objects']['lines'].append(['_'.join([dbase, 'objects']), dbase, 'absolute'])
-
- if self.do_replica:
- def create_lines(hosts, string):
- lines = list()
- for host in hosts:
- dim_id = '_'.join([host, string])
- lines.append([dim_id, host, 'absolute', 1, 1000])
- return lines
-
- def create_state_lines(states):
- lines = list()
- for state, description in states:
- dim_id = '_'.join([host, 'state', state])
- lines.append([dim_id, description, 'absolute', 1, 1])
- return lines
-
- all_hosts = server_status['repl']['hosts'] + server_status['repl'].get('arbiters', list())
- this_host = server_status['repl']['me']
- other_hosts = [host for host in all_hosts if host != this_host]
-
- if 'local' in self.databases:
- self.order.append('oplog_window')
- self.definitions['oplog_window'] = {
- 'options': [None, 'Interval of time between the oldest and the latest entries in the oplog',
- 'seconds', 'replication and oplog', 'mongodb.oplog_window', 'line'],
- 'lines': [['timeDiff', 'window', 'absolute', 1, 1000]]}
- # Create "heartbeat delay" chart
- self.order.append('heartbeat_delay')
- self.definitions['heartbeat_delay'] = {
- 'options': [
- None,
- 'Time when last heartbeat was received from the replica set member (lastHeartbeatRecv)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_heartbeat_delay', 'stacked'],
- 'lines': create_lines(other_hosts, 'heartbeat_lag')}
- # Create "optimedate delay" chart
- self.order.append('optimedate_delay')
- self.definitions['optimedate_delay'] = {
- 'options': [None, 'Time when last entry from the oplog was applied (optimeDate)',
- 'seconds ago', 'replication and oplog', 'mongodb.replication_optimedate_delay', 'stacked'],
- 'lines': create_lines(all_hosts, 'optimedate')}
- # Create "replica set members state" chart
- for host in all_hosts:
- chart_name = '_'.join([host, 'state'])
- self.order.append(chart_name)
- self.definitions[chart_name] = {
- 'options': [None, 'Replica set member (%s) current state' % host, 'state',
- 'replication and oplog', 'mongodb.replication_state', 'line'],
- 'lines': create_state_lines(REPL_SET_STATES)}
-
- def _get_raw_data(self):
- raw_data = dict()
-
- raw_data.update(self.get_server_status() or dict())
- raw_data.update(self.get_db_stats() or dict())
- raw_data.update(self.get_repl_set_get_status() or dict())
- raw_data.update(self.get_get_replication_info() or dict())
-
- return raw_data or None
-
- def get_server_status(self):
- raw_data = dict()
- try:
- raw_data['serverStatus'] = self.connection.admin.command('serverStatus')
- except PyMongoError:
- return None
- else:
- return raw_data
-
- def get_db_stats(self):
- if not self.databases:
- return None
-
- raw_data = dict()
- raw_data['dbStats'] = dict()
- try:
- for dbase in self.databases:
- raw_data['dbStats'][dbase] = self.connection[dbase].command('dbStats')
- return raw_data
- except PyMongoError:
- return None
-
- def get_repl_set_get_status(self):
- if not self.do_replica:
- return None
-
- raw_data = dict()
- try:
- raw_data['replSetGetStatus'] = self.connection.admin.command('replSetGetStatus')
- return raw_data
- except PyMongoError:
- return None
-
- def get_get_replication_info(self):
- if not (self.do_replica and 'local' in self.databases):
- return None
-
- raw_data = dict()
- raw_data['getReplicationInfo'] = dict()
- try:
- raw_data['getReplicationInfo']['ASCENDING'] = self.connection.local.oplog.rs.find().sort(
- '$natural', ASCENDING).limit(1)[0]
- raw_data['getReplicationInfo']['DESCENDING'] = self.connection.local.oplog.rs.find().sort(
- '$natural', DESCENDING).limit(1)[0]
- return raw_data
- except PyMongoError:
- return None
-
- def _get_data(self):
- """
- :return: dict
- """
- raw_data = self._get_raw_data()
-
- if not raw_data:
- return None
-
- data = dict()
- serverStatus = raw_data['serverStatus']
- dbStats = raw_data.get('dbStats')
- replSetGetStatus = raw_data.get('replSetGetStatus')
- getReplicationInfo = raw_data.get('getReplicationInfo')
- utc_now = datetime.utcnow()
-
- # serverStatus
- for metric, new_name, func in self.metrics_to_collect:
- value = serverStatus
- for key in metric.split('.'):
- try:
- value = value[key]
- except KeyError:
- break
-
- if not isinstance(value, dict) and key:
- data[new_name or key] = value if not func else func(value)
-
- if 'mapped' in serverStatus['mem']:
- data['nonmapped'] = data['virtual'] - serverStatus['mem'].get('mappedWithJournal', data['mapped'])
-
- if data.get('maximum bytes configured'):
- maximum = data['maximum bytes configured']
- data['wiredTiger_percent_clean'] = int(data['bytes currently in the cache'] * 100 / maximum * 1000)
- data['wiredTiger_percent_dirty'] = int(data['tracked dirty bytes in the cache'] * 100 / maximum * 1000)
-
- # dbStats
- if dbStats:
- for dbase in dbStats:
- for metric in DBSTATS:
- key = '_'.join([dbase, metric])
- data[key] = dbStats[dbase][metric]
-
- # replSetGetStatus
- if replSetGetStatus:
- other_hosts = list()
- members = replSetGetStatus['members']
- unix_epoch = datetime(1970, 1, 1, 0, 0)
-
- for member in members:
- if not member.get('self'):
- other_hosts.append(member)
-
- # Replica set time diff between current time and time when last entry from the oplog was applied
- if member.get('optimeDate', unix_epoch) != unix_epoch:
- member_optimedate = member['name'] + '_optimedate'
- delta = utc_now - member['optimeDate']
- data[member_optimedate] = int(delta_calculation(delta=delta, multiplier=1000))
-
- # Replica set members state
- member_state = member['name'] + '_state'
- for elem in REPL_SET_STATES:
- state = elem[0]
- data.update({'_'.join([member_state, state]): 0})
- data.update({'_'.join([member_state, str(member['state'])]): member['state']})
-
- # Heartbeat lag calculation
- for other in other_hosts:
- if other['lastHeartbeatRecv'] != unix_epoch:
- node = other['name'] + '_heartbeat_lag'
- delta = utc_now - other['lastHeartbeatRecv']
- data[node] = int(delta_calculation(delta=delta, multiplier=1000))
-
- if getReplicationInfo:
- first_event = getReplicationInfo['ASCENDING']['ts'].as_datetime()
- last_event = getReplicationInfo['DESCENDING']['ts'].as_datetime()
- data['timeDiff'] = int(delta_calculation(delta=last_event - first_event, multiplier=1000))
-
- return data
-
- def build_ssl_connection_params(self):
- conf = self.configuration
-
- def cert_req(v):
- if v is None:
- return None
- if not v:
- return ssl.CERT_NONE
- return ssl.CERT_REQUIRED
-
- ssl_params = {
- CONN_PARAM_SSL_SSL: conf.get(CONN_PARAM_SSL_SSL),
- CONN_PARAM_SSL_CERT_REQS: cert_req(conf.get(CONN_PARAM_SSL_CERT_REQS)),
- CONN_PARAM_SSL_CA_CERTS: conf.get(CONN_PARAM_SSL_CA_CERTS),
- CONN_PARAM_SSL_CRL_FILE: conf.get(CONN_PARAM_SSL_CRL_FILE),
- CONN_PARAM_SSL_CERT_FILE: conf.get(CONN_PARAM_SSL_CERT_FILE),
- CONN_PARAM_SSL_KEY_FILE: conf.get(CONN_PARAM_SSL_KEY_FILE),
- CONN_PARAM_SSL_PEM_PASSPHRASE: conf.get(CONN_PARAM_SSL_PEM_PASSPHRASE),
- }
-
- ssl_params = dict((k, v) for k, v in ssl_params.items() if v is not None)
-
- return ssl_params
-
- def build_connection_params(self):
- conf = self.configuration
- params = {
- CONN_PARAM_HOST: conf.get(CONN_PARAM_HOST, DEFAULT_HOST),
- CONN_PARAM_PORT: conf.get(CONN_PARAM_PORT, DEFAULT_PORT),
- }
- if hasattr(MongoClient, 'server_selection_timeout') or version_tuple[0] >= 4:
- params[CONN_PARAM_SERVER_SELECTION_TIMEOUT_MS] = conf.get('timeout', DEFAULT_TIMEOUT)
-
- params.update(self.build_ssl_connection_params())
- return params
-
- def _create_connection(self):
- params = self.build_connection_params()
- self.debug('creating connection, connection params: {0}'.format(sorted(params)))
-
- try:
- connection = MongoClient(**params)
- if self.user and self.password:
- self.debug('authenticating, user: {0}, password: {1}'.format(self.user, self.password))
- getattr(connection, self.authdb).authenticate(name=self.user, password=self.password)
- else:
- self.debug('skip authenticating, user and password are not set')
- # elif self.user:
- # connection.admin.authenticate(name=self.user, mechanism='MONGODB-X509')
- server_status = connection.admin.command('serverStatus')
- except PyMongoError as error:
- return None, None, str(error)
- else:
- try:
- self.databases = connection.database_names()
- except PyMongoError as error:
- self.info('Can\'t collect databases: %s' % str(error))
- return connection, server_status, None
-
-
-def delta_calculation(delta, multiplier=1):
- if hasattr(delta, 'total_seconds'):
- return delta.total_seconds() * multiplier
- return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6 * multiplier
diff --git a/collectors/python.d.plugin/mongodb/mongodb.conf b/collectors/python.d.plugin/mongodb/mongodb.conf
deleted file mode 100644
index 9f660f594..000000000
--- a/collectors/python.d.plugin/mongodb/mongodb.conf
+++ /dev/null
@@ -1,102 +0,0 @@
-# netdata python.d.plugin configuration for mongodb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, mongodb also supports the following:
-#
-# host: 'IP or HOSTNAME' # type <str> the host to connect to
-# port: PORT # type <int> the port to connect to
-#
-# in all cases, the following can also be set:
-#
-# authdb: 'dbname' # database to authenticate the user against,
-# # defaults to "admin".
-# user: 'username' # the mongodb username to use
-# pass: 'password' # the mongodb password to use
-#
-# SSL connection parameters (https://api.mongodb.com/python/current/examples/tls.html):
-#
-# ssl: yes # connect to the server using TLS
-# ssl_cert_reqs: yes # require a certificate from the server when TLS is enabled
-# ssl_ca_certs: '/path/to/ca.pem' # use a specific set of CA certificates
-# ssl_crlfile: '/path/to/crl.pem' # use a certificate revocation lists
-# ssl_certfile: '/path/to/client.pem' # use a client certificate
-# ssl_keyfile: '/path/to/key.pem' # use a specific client certificate key
-# ssl_pem_passphrase: 'passphrase' # use a passphrase to decrypt encrypted private keys
-#
-
-# ----------------------------------------------------------------------
-# to connect to the mongodb on localhost, without a password:
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- name : 'local'
- host : '127.0.0.1'
- port : 27017
-
-# authsample:
-# name : 'secure'
-# host : 'mongodb.example.com'
-# port : 27017
-# authdb : 'admin'
-# user : 'monitor'
-# pass : 'supersecret'
diff --git a/collectors/python.d.plugin/monit/README.md b/collectors/python.d.plugin/monit/README.md
index 13960256b..816143ebf 100644
--- a/collectors/python.d.plugin/monit/README.md
+++ b/collectors/python.d.plugin/monit/README.md
@@ -1,34 +1,40 @@
<!--
title: "Monit monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/monit/README.md"
sidebar_label: "Monit"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Storage"
-->
# Monit monitoring with Netdata
-Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official documentation). Mostly this plugin shows statuses of monit targets, i.e. [statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
+Monit monitoring module. Data is grabbed from stats XML interface (exists for a long time, but not mentioned in official
+documentation). Mostly this plugin shows statuses of monit targets, i.e.
+[statuses of specified checks](https://mmonit.com/monit/documentation/monit.html#Service-checks).
-1. **Filesystems**
+1. **Filesystems**
- - Filesystems
- - Directories
- - Files
- - Pipes
+ - Filesystems
+ - Directories
+ - Files
+ - Pipes
-2. **Applications**
+2. **Applications**
- - Processes (+threads/childs)
- - Programs
+ - Processes (+threads/childs)
+ - Programs
-3. **Network**
+3. **Network**
- - Hosts (+latency)
- - Network interfaces
+ - Hosts (+latency)
+ - Network interfaces
## Configuration
-Edit the `python.d/monit.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Edit the `python.d/monit.conf` configuration file using `edit-config` from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically
+at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -39,10 +45,10 @@ Sample:
```yaml
local:
- name : 'local'
- url : 'http://localhost:2812'
- user: : admin
- pass: : monit
+ name: 'local'
+ url: 'http://localhost:2812'
+ user: : admin
+ pass: : monit
```
If no configuration is given, module will attempt to connect to monit as `http://localhost:2812`.
diff --git a/collectors/python.d.plugin/nsd/README.md b/collectors/python.d.plugin/nsd/README.md
index e5183aeb7..f99726c30 100644
--- a/collectors/python.d.plugin/nsd/README.md
+++ b/collectors/python.d.plugin/nsd/README.md
@@ -1,7 +1,10 @@
<!--
title: "NSD monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nsd/README.md"
sidebar_label: "NSD"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Networking"
-->
# NSD monitoring with Netdata
diff --git a/collectors/python.d.plugin/ntpd/README.md b/collectors/python.d.plugin/ntpd/README.md
index 9832707bd..8ae923da5 100644
--- a/collectors/python.d.plugin/ntpd/README.md
+++ b/collectors/python.d.plugin/ntpd/README.md
@@ -1,90 +1,14 @@
<!--
title: "NTP daemon monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ntpd/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/ntpd/README.md"
sidebar_label: "NTP daemon"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Qos"
-->
# NTP daemon monitoring with Netdata
-Monitors the system variables of the local `ntpd` daemon (optional incl. variables of the polled peers) using the NTP Control Message Protocol via UDP socket, similar to `ntpq`, the [standard NTP query program](http://doc.ntp.org/current-stable/ntpq.html).
-
-## Requirements
-
-- Version: `NTPv4`
-- Local interrogation allowed in `/etc/ntp.conf` (default):
-
-```
-# Local users may interrogate the ntp server more closely.
-restrict 127.0.0.1
-restrict ::1
-```
-
-It produces:
-
-1. system
-
- - offset
- - jitter
- - frequency
- - delay
- - dispersion
- - stratum
- - tc
- - precision
-
-2. peers
-
- - offset
- - delay
- - dispersion
- - jitter
- - rootdelay
- - rootdispersion
- - stratum
- - hmode
- - pmode
- - hpoll
- - ppoll
- - precision
-
-## Configuration
-
-Edit the `python.d/ntpd.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/ntpd.conf
-```
-
-Sample:
-
-```yaml
-update_every: 10
-
-host: 'localhost'
-port: '123'
-show_peers: yes
-# hide peers with source address in ranges 127.0.0.0/8 and 192.168.0.0/16
-peer_filter: '(127\..*)|(192\.168\..*)'
-# check for new/changed peers every 60 updates
-peer_rescan: 60
-```
-
-Sample (multiple jobs):
-
-Note: `ntp.conf` on the host `otherhost` must be configured to allow queries from our local host by including a line like `restrict <IP> nomodify notrap nopeer`.
-
-```yaml
-local:
- host: 'localhost'
-
-otherhost:
- host: 'otherhost'
-```
-
-If no configuration is given, module will attempt to connect to `ntpd` on `::1:123` or `127.0.0.1:123` and show charts for the systemvars. Use `show_peers: yes` to also show the charts for configured peers. Local peers in the range `127.0.0.0/8` are hidden by default, use `peer_filter: ''` to show all peers.
-
----
-
-
+This collector is deprecated.
+Use [go.d/ntpd](https://github.com/netdata/go.d.plugin/tree/master/modules/ntpd#ntp-daemon-monitoring-with-netdata)
+instead. \ No newline at end of file
diff --git a/collectors/python.d.plugin/ntpd/ntpd.chart.py b/collectors/python.d.plugin/ntpd/ntpd.chart.py
index 275d2276c..077124b4f 100644
--- a/collectors/python.d.plugin/ntpd/ntpd.chart.py
+++ b/collectors/python.d.plugin/ntpd/ntpd.chart.py
@@ -9,6 +9,8 @@ import struct
from bases.FrameworkServices.SocketService import SocketService
+disabled_by_default = True
+
# NTP Control Message Protocol constants
MODE = 6
HEADER_FORMAT = '!BBHHHHH'
diff --git a/collectors/python.d.plugin/nvidia_smi/README.md b/collectors/python.d.plugin/nvidia_smi/README.md
index bb4169441..ce5473c26 100644
--- a/collectors/python.d.plugin/nvidia_smi/README.md
+++ b/collectors/python.d.plugin/nvidia_smi/README.md
@@ -1,14 +1,17 @@
<!--
title: "Nvidia GPU monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md
-sidebar_label: "Nvidia GPUs"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nvidia_smi/README.md"
+sidebar_label: "nvidia_smi-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Devices"
-->
# Nvidia GPU monitoring with Netdata
Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
-> **Warning**: this collector does not work when the Netdata Agent is [running in a container](https://learn.netdata.cloud/docs/agent/packaging/docker).
+> **Warning**: this collector does not work when the Netdata Agent is [running in a container](https://github.com/netdata/netdata/blob/master/packaging/docker/README.md).
## Requirements and Notes
@@ -48,7 +51,7 @@ It produces the following charts:
## Configuration
Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
index 23e90e658..6affae7b8 100644
--- a/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ b/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
@@ -22,6 +22,7 @@ EMPTY_ROW_LIMIT = 500
POLLER_BREAK_ROW = '</nvidia_smi_log>'
PCI_BANDWIDTH = 'pci_bandwidth'
+PCI_BANDWIDTH_PERCENT = 'pci_bandwidth_percent'
FAN_SPEED = 'fan_speed'
GPU_UTIL = 'gpu_utilization'
MEM_UTIL = 'mem_utilization'
@@ -38,6 +39,7 @@ USER_NUM = 'user_num'
ORDER = [
PCI_BANDWIDTH,
+ PCI_BANDWIDTH_PERCENT,
FAN_SPEED,
GPU_UTIL,
MEM_UTIL,
@@ -56,7 +58,22 @@ ORDER = [
# https://docs.nvidia.com/gameworks/content/gameworkslibrary/coresdk/nvapi/group__gpupstate.html
POWER_STATES = ['P' + str(i) for i in range(0, 16)]
-
+# PCI Transfer data rate in gigabits per second (Gb/s) per generation
+PCI_SPEED = {
+ "1": 2.5,
+ "2": 5,
+ "3": 8,
+ "4": 16,
+ "5": 32
+}
+# PCI encoding per generation
+PCI_ENCODING = {
+ "1": 2/10,
+ "2": 2/10,
+ "3": 2/130,
+ "4": 2/130,
+ "5": 2/130
+}
def gpu_charts(gpu):
fam = gpu.full_name()
@@ -68,6 +85,13 @@ def gpu_charts(gpu):
['tx_util', 'tx', 'absolute', 1, -1],
]
},
+ PCI_BANDWIDTH_PERCENT: {
+ 'options': [None, 'PCI Express Bandwidth Percent', 'percentage', fam, 'nvidia_smi.pci_bandwidth_percent', 'area'],
+ 'lines': [
+ ['rx_util_percent', 'rx_percent'],
+ ['tx_util_percent', 'tx_percent'],
+ ]
+ },
FAN_SPEED: {
'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
'lines': [
@@ -327,6 +351,24 @@ class GPU:
return 'gpu{0} {1}'.format(self.num, self.name())
@handle_attr_error
+ def pci_link_gen(self):
+ return self.root.find('pci').find('pci_gpu_link_info').find('pcie_gen').find('max_link_gen').text
+
+ @handle_attr_error
+ def pci_link_width(self):
+ return self.root.find('pci').find('pci_gpu_link_info').find('link_widths').find('max_link_width').text.split('x')[0]
+
+ def pci_bw_max(self):
+ link_gen = self.pci_link_gen()
+ link_width = int(self.pci_link_width())
+ if link_gen not in PCI_SPEED or link_gen not in PCI_ENCODING or not link_width:
+ return None
+ # Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s.
+ # see details https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance
+ # return max bandwidth in kilobytes per second (kB/s)
+ return (PCI_SPEED[link_gen] * link_width * (1- PCI_ENCODING[link_gen]) - 1) * 1000 * 1000 / 8
+
+ @handle_attr_error
def rx_util(self):
return self.root.find('pci').find('rx_util').text.split()[0]
@@ -439,6 +481,15 @@ class GPU:
'power_draw': self.power_draw(),
}
+ pci_bw_max = self.pci_bw_max()
+ if not pci_bw_max:
+ data['rx_util_percent'] = 0
+ data['tx_util_percent'] = 0
+ else :
+ data['rx_util_percent'] = str(int(int(self.rx_util())*100/self.pci_bw_max()))
+ data['tx_util_percent'] = str(int(int(self.tx_util())*100/self.pci_bw_max()))
+
+
for v in POWER_STATES:
data['power_state_' + v.lower()] = 0
p_state = self.power_state()
diff --git a/collectors/python.d.plugin/openldap/README.md b/collectors/python.d.plugin/openldap/README.md
index b0cd1db42..4f29bbb49 100644
--- a/collectors/python.d.plugin/openldap/README.md
+++ b/collectors/python.d.plugin/openldap/README.md
@@ -1,7 +1,10 @@
<!--
title: "OpenLDAP monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/openldap/README.md"
sidebar_label: "OpenLDAP"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Networking"
-->
# OpenLDAP monitoring with Netdata
@@ -56,7 +59,7 @@ Statistics are taken from LDAP monitoring interface. Manual page, slapd-monitor(
## Configuration
Edit the `python.d/openldap.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
index 88024f8c5..78f807d61 100644
--- a/collectors/python.d.plugin/oracledb/README.md
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -1,7 +1,10 @@
<!--
title: "OracleDB monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/oracledb/README.md"
sidebar_label: "OracleDB"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Databases"
-->
# OracleDB monitoring with Netdata
@@ -71,7 +74,7 @@ GRANT SELECT_CATALOG_ROLE TO netdata;
## Configuration
Edit the `python.d/oracledb.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/postfix/README.md b/collectors/python.d.plugin/postfix/README.md
index 1a546c614..8d646ad51 100644
--- a/collectors/python.d.plugin/postfix/README.md
+++ b/collectors/python.d.plugin/postfix/README.md
@@ -1,7 +1,10 @@
<!--
title: "Postfix monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/postfix/README.md"
sidebar_label: "Postfix"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Postfix monitoring with Netdata
diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md
index 8c6a394f1..d6c626b51 100644
--- a/collectors/python.d.plugin/proxysql/README.md
+++ b/collectors/python.d.plugin/proxysql/README.md
@@ -1,106 +1,14 @@
<!--
title: "ProxySQL monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/proxysql/README.md
-sidebar_label: "ProxySQL"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/proxysql/README.md"
+sidebar_label: "proxysql-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Databases"
-->
# ProxySQL monitoring with Netdata
-Monitors database backend and frontend performance metrics.
-
-## Requirements
-
-- python library [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) (faster) or [PyMySQL](https://github.com/PyMySQL/PyMySQL) (slower)
-- `netdata` local user to connect to the ProxySQL server.
-
-To create the `netdata` user, follow [the documentation](https://github.com/sysown/proxysql/wiki/Users-configuration#creating-a-new-user).
-
-## Charts
-
-It produces:
-
-1. **Connections (frontend)**
-
- - connected: number of frontend connections currently connected
- - aborted: number of frontend connections aborted due to invalid credential or max_connections reached
- - non_idle: number of frontend connections that are not currently idle
- - created: number of frontend connections created
-
-2. **Questions (frontend)**
-
- - questions: total number of queries sent from frontends
- - slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time`
-
-3. **Overall Bandwidth (backends)**
-
- - in
- - out
-
-4. **Status (backends)**
-
- - Backends
- - `1=ONLINE`: backend server is fully operational
- - `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold
- - `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend
- - `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work
- - `-1`: Unknown status
-
-5. **Bandwidth (backends)**
-
- - Backends
- - in
- - out
-
-6. **Queries (backends)**
-
- - Backends
- - queries
-
-7. **Latency (backends)**
-
- - Backends
- - ping time
-
-8. **Pool connections (backends)**
-
- - Backends
- - Used: The number of connections are currently used by ProxySQL for sending queries to the backend server.
- - Free: The number of connections are currently free.
- - Established/OK: The number of connections were established successfully.
- - Error: The number of connections weren't established successfully.
-
-9. **Commands**
-
- - Commands
- - Count
- - Duration (Total duration for each command)
-
-10. **Commands Histogram**
-
- - Commands
- - 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one.
-
-## Configuration
-
-Edit the `python.d/proxysql.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/proxysql.conf
-```
-
-```yaml
-tcpipv4:
- name : 'local'
- user : 'stats'
- pass : 'stats'
- host : '127.0.0.1'
- port : '6032'
-```
-
-If no configuration is given, module will fail to run.
-
----
-
-
+This collector is deprecated.
+Use [go.d/proxysql](https://github.com/netdata/go.d.plugin/tree/master/modules/proxysql#proxysql-monitoring-with-netdata)
+instead. \ No newline at end of file
diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py
index 982c28ee7..7e06b7bdc 100644
--- a/collectors/python.d.plugin/proxysql/proxysql.chart.py
+++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py
@@ -6,6 +6,8 @@
from bases.FrameworkServices.MySQLService import MySQLService
+disabled_by_default = True
+
def query(table, *params):
return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
diff --git a/collectors/python.d.plugin/puppet/README.md b/collectors/python.d.plugin/puppet/README.md
index 1b06d181b..8b98b8a2d 100644
--- a/collectors/python.d.plugin/puppet/README.md
+++ b/collectors/python.d.plugin/puppet/README.md
@@ -1,7 +1,10 @@
<!--
title: "Puppet monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/puppet/README.md"
sidebar_label: "Puppet"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Provisioning tools"
-->
# Puppet monitoring with Netdata
@@ -33,7 +36,7 @@ Following charts are drawn:
## Configuration
Edit the `python.d/puppet.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/python.d.conf b/collectors/python.d.plugin/python.d.conf
index 7b43ee205..41385dac6 100644
--- a/collectors/python.d.plugin/python.d.conf
+++ b/collectors/python.d.plugin/python.d.conf
@@ -34,7 +34,6 @@ gc_interval: 300
# boinc: yes
# ceph: yes
# changefinder: no
-# dockerd: yes
# dovecot: yes
# this is just an example
@@ -51,7 +50,6 @@ hpssa: no
# icecast: yes
# ipfs: yes
# litespeed: yes
-logind: no
# megacli: yes
# memcached: yes
# mongodb: yes
@@ -73,7 +71,6 @@ logind: no
# sensors: yes
# smartd_log: yes
# spigotmc: yes
-# springboot: yes
# squid: yes
# traefik: yes
# tomcat: yes
diff --git a/collectors/python.d.plugin/rabbitmq/README.md b/collectors/python.d.plugin/rabbitmq/README.md
index 927adcc68..19df65694 100644
--- a/collectors/python.d.plugin/rabbitmq/README.md
+++ b/collectors/python.d.plugin/rabbitmq/README.md
@@ -1,7 +1,10 @@
<!--
title: "RabbitMQ monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md
-sidebar_label: "RabbitMQ"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rabbitmq/README.md"
+sidebar_label: "rabbitmq-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Message brokers"
-->
# RabbitMQ monitoring with Netdata
@@ -93,7 +96,7 @@ Per Vhost charts:
## Configuration
Edit the `python.d/rabbitmq.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/rethinkdbs/README.md b/collectors/python.d.plugin/rethinkdbs/README.md
index d3fa3553a..578c1c0b1 100644
--- a/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/collectors/python.d.plugin/rethinkdbs/README.md
@@ -1,7 +1,10 @@
<!--
title: "RethinkDB monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/rethinkdbs/README.md"
sidebar_label: "RethinkDB"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Databases"
-->
# RethinkDB monitoring with Netdata
@@ -10,27 +13,28 @@ Collects database server and cluster statistics.
Following charts are drawn:
-1. **Connected Servers**
+1. **Connected Servers**
- - connected
- - missing
+ - connected
+ - missing
-2. **Active Clients**
+2. **Active Clients**
- - active
+ - active
-3. **Queries** per second
+3. **Queries** per second
- - queries
+ - queries
-4. **Documents** per second
+4. **Documents** per second
- - documents
+ - documents
## Configuration
-Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Edit the `python.d/rethinkdbs.conf` configuration file using `edit-config` from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically
+at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -39,11 +43,11 @@ sudo ./edit-config python.d/rethinkdbs.conf
```yaml
localhost:
- name : 'local'
- host : '127.0.0.1'
- port : 28015
- user : "user"
- password : "pass"
+ name: 'local'
+ host: '127.0.0.1'
+ port: 28015
+ user: "user"
+ password: "pass"
```
When no configuration file is found, module tries to connect to `127.0.0.1:28015`.
diff --git a/collectors/python.d.plugin/retroshare/README.md b/collectors/python.d.plugin/retroshare/README.md
index 297df9fca..142b7d5bf 100644
--- a/collectors/python.d.plugin/retroshare/README.md
+++ b/collectors/python.d.plugin/retroshare/README.md
@@ -1,7 +1,10 @@
<!--
title: "RetroShare monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/retroshare/README.md"
sidebar_label: "RetroShare"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Apm"
-->
# RetroShare monitoring with Netdata
@@ -22,7 +25,7 @@ This module produces the following charts:
## Configuration
Edit the `python.d/retroshare.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/riakkv/README.md b/collectors/python.d.plugin/riakkv/README.md
index fe62c6718..5e533a419 100644
--- a/collectors/python.d.plugin/riakkv/README.md
+++ b/collectors/python.d.plugin/riakkv/README.md
@@ -1,7 +1,10 @@
<!--
title: "Riak KV monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/riakkv/README.md"
sidebar_label: "Riak KV"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Databases"
-->
# Riak KV monitoring with Netdata
@@ -103,7 +106,7 @@ listed
## Configuration
Edit the `python.d/riakkv.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/samba/README.md b/collectors/python.d.plugin/samba/README.md
index 767df12de..41ae1c5ba 100644
--- a/collectors/python.d.plugin/samba/README.md
+++ b/collectors/python.d.plugin/samba/README.md
@@ -1,7 +1,10 @@
<!--
title: "Samba monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/samba/README.md"
sidebar_label: "Samba"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Apps"
-->
# Samba monitoring with Netdata
@@ -95,7 +98,7 @@ systemctl restart netdata.service
## Enable the collector
The `samba` collector is disabled by default. To enable it, use `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`, to edit the `python.d.conf`
file.
```bash
@@ -104,12 +107,12 @@ sudo ./edit-config python.d.conf
```
Change the value of the `samba` setting to `yes`. Save the file and restart the Netdata Agent with `sudo systemctl
-restart netdata`, or the [appropriate method](/docs/configure/start-stop-restart.md) for your system.
+restart netdata`, or the [appropriate method](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for your system.
## Configuration
Edit the `python.d/samba.conf` configuration file using `edit-config` from the
-Netdata [config directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/sensors/README.md b/collectors/python.d.plugin/sensors/README.md
index e791195d4..f5f435854 100644
--- a/collectors/python.d.plugin/sensors/README.md
+++ b/collectors/python.d.plugin/sensors/README.md
@@ -1,7 +1,10 @@
<!--
title: "Linux machine sensors monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md
-sidebar_label: "Linux machine sensors"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/sensors/README.md"
+sidebar_label: "sensors-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Devices"
-->
# Linux machine sensors monitoring with Netdata
@@ -13,7 +16,7 @@ Charts are created dynamically.
## Configuration
Edit the `python.d/sensors.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -26,7 +29,7 @@ There have been reports from users that on certain servers, ACPI ring buffer err
We are tracking such cases in issue [#827](https://github.com/netdata/netdata/issues/827).
Please join this discussion for help.
-When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures), use [the legacy bash collector](https://learn.netdata.cloud/docs/agent/collectors/charts.d.plugin/sensors)
+When `lm-sensors` doesn't work on your device (e.g. for RPi temperatures), use [the legacy bash collector](https://github.com/netdata/netdata/blob/master/collectors/charts.d.plugin/sensors/README.md)
---
diff --git a/collectors/python.d.plugin/smartd_log/README.md b/collectors/python.d.plugin/smartd_log/README.md
index eef34ce43..7c1e845f8 100644
--- a/collectors/python.d.plugin/smartd_log/README.md
+++ b/collectors/python.d.plugin/smartd_log/README.md
@@ -1,7 +1,10 @@
<!--
title: "Storage devices monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/smartd_log/README.md"
sidebar_label: "S.M.A.R.T. attributes"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Devices"
-->
# Storage devices monitoring with Netdata
@@ -106,7 +109,7 @@ Otherwise, all the smartd `.csv` files may get written to `/var/lib/smartmontool
## Configuration
Edit the `python.d/smartd_log.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/spigotmc/README.md b/collectors/python.d.plugin/spigotmc/README.md
index 06483188b..6d8e4b62b 100644
--- a/collectors/python.d.plugin/spigotmc/README.md
+++ b/collectors/python.d.plugin/spigotmc/README.md
@@ -1,7 +1,10 @@
<!--
title: "SpigotMC monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/spigotmc/README.md"
sidebar_label: "SpigotMC"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# SpigotMC monitoring with Netdata
@@ -18,7 +21,7 @@ the data returned by the `tps` or `list` console commands.
## Configuration
Edit the `python.d/spigotmc.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/springboot/Makefile.inc b/collectors/python.d.plugin/springboot/Makefile.inc
deleted file mode 100644
index 06775f937..000000000
--- a/collectors/python.d.plugin/springboot/Makefile.inc
+++ /dev/null
@@ -1,13 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-# THIS IS NOT A COMPLETE Makefile
-# IT IS INCLUDED BY ITS PARENT'S Makefile.am
-# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
-
-# install these files
-dist_python_DATA += springboot/springboot.chart.py
-dist_pythonconfig_DATA += springboot/springboot.conf
-
-# do not install these files, but include them in the distribution
-dist_noinst_DATA += springboot/README.md springboot/Makefile.inc
-
diff --git a/collectors/python.d.plugin/springboot/README.md b/collectors/python.d.plugin/springboot/README.md
deleted file mode 100644
index cdbc9a900..000000000
--- a/collectors/python.d.plugin/springboot/README.md
+++ /dev/null
@@ -1,145 +0,0 @@
-<!--
-title: "Java Spring Boot 2 application monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/springboot/README.md
-sidebar_label: "Java Spring Boot 2 applications"
--->
-
-# Java Spring Boot 2 application monitoring with Netdata
-
-Monitors one or more Java Spring-boot applications depending on configuration.
-Netdata can be used to monitor running Java [Spring Boot](https://spring.io/) applications that expose their metrics with the use of the **Spring Boot Actuator** included in Spring Boot library.
-
-## Configuration
-
-The Spring Boot Actuator exposes these metrics over HTTP and is very easy to use:
-
-- add `org.springframework.boot:spring-boot-starter-actuator` to your application dependencies
-- set `endpoints.metrics.sensitive=false` in your `application.properties`
-
-You can create custom Metrics by add and inject a PublicMetrics in your application.
-This is a example to add custom metrics:
-
-```java
-package com.example;
-
-import org.springframework.boot.actuate.endpoint.PublicMetrics;
-import org.springframework.boot.actuate.metrics.Metric;
-import org.springframework.stereotype.Service;
-
-import java.lang.management.ManagementFactory;
-import java.lang.management.MemoryPoolMXBean;
-import java.util.ArrayList;
-import java.util.Collection;
-
-@Service
-public class HeapPoolMetrics implements PublicMetrics {
-
- private static final String PREFIX = "mempool.";
- private static final String KEY_EDEN = PREFIX + "eden";
- private static final String KEY_SURVIVOR = PREFIX + "survivor";
- private static final String KEY_TENURED = PREFIX + "tenured";
-
- @Override
- public Collection<Metric<?>> metrics() {
- Collection<Metric<?>> result = new ArrayList<>(4);
- for (MemoryPoolMXBean mem : ManagementFactory.getMemoryPoolMXBeans()) {
- String poolName = mem.getName();
- String name = null;
- if (poolName.indexOf("Eden Space") != -1) {
- name = KEY_EDEN;
- } else if (poolName.indexOf("Survivor Space") != -1) {
- name = KEY_SURVIVOR;
- } else if (poolName.indexOf("Tenured Gen") != -1 || poolName.indexOf("Old Gen") != -1) {
- name = KEY_TENURED;
- }
-
- if (name != null) {
- result.add(newMemoryMetric(name, mem.getUsage().getMax()));
- result.add(newMemoryMetric(name + ".init", mem.getUsage().getInit()));
- result.add(newMemoryMetric(name + ".committed", mem.getUsage().getCommitted()));
- result.add(newMemoryMetric(name + ".used", mem.getUsage().getUsed()));
- }
- }
- return result;
- }
-
- private Metric<Long> newMemoryMetric(String name, long bytes) {
- return new Metric<>(name, bytes / 1024);
- }
-}
-```
-
-Please refer [Spring Boot Actuator: Production-ready Features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready-features.html#production-ready) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto.html#howto-actuator) for more information.
-
-## Charts
-
-1. **Response Codes** in requests/s
-
- - 1xx
- - 2xx
- - 3xx
- - 4xx
- - 5xx
- - others
-
-2. **Threads**
-
- - daemon
- - total
-
-3. **GC Time** in milliseconds and **GC Operations** in operations/s
-
- - Copy
- - MarkSweep
- - ...
-
-4. **Heap Memory Usage** in KB
-
- - used
- - committed
-
-## Usage
-
-Edit the `python.d/springboot.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/springboot.conf
-```
-
-This module defines some common charts, and you can add custom charts by change the configurations.
-
-The configuration format is like:
-
-```yaml
-<id>:
- name: '<name>'
- url: '<metrics endpoint>' # ex. http://localhost:8080/metrics
- user: '<username>' # optional
- pass: '<password>' # optional
- defaults:
- [<chart-id>]: true|false
- extras:
- - id: '<chart-id>'
- options:
- title: '***'
- units: '***'
- family: '***'
- context: 'springboot.***'
- charttype: 'stacked' | 'area' | 'line'
- lines:
- - { dimension: 'myapp_ok', name: 'ok', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ok" metrics
- - { dimension: 'myapp_ng', name: 'ng', algorithm: 'absolute', multiplier: 1, divisor: 1} # it shows "myapp.ng" metrics
-```
-
-By default, it creates `response_code`, `threads`, `gc_time`, `gc_ope` abd `heap` charts.
-You can disable the default charts by set `defaults.<chart-id>: false`.
-
-The dimension name of extras charts should replace `.` to `_`.
-
-Please check
-[springboot.conf](https://raw.githubusercontent.com/netdata/netdata/master/collectors/python.d.plugin/springboot/springboot.conf)
-for more examples.
-
-
diff --git a/collectors/python.d.plugin/springboot/springboot.chart.py b/collectors/python.d.plugin/springboot/springboot.chart.py
deleted file mode 100644
index dbe11d6b8..000000000
--- a/collectors/python.d.plugin/springboot/springboot.chart.py
+++ /dev/null
@@ -1,160 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: tomcat netdata python.d module
-# Author: Wing924
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-DEFAULT_ORDER = [
- 'response_code',
- 'threads',
- 'gc_time',
- 'gc_ope',
- 'heap',
-]
-
-DEFAULT_CHARTS = {
- 'response_code': {
- 'options': [None, "Response Codes", "requests/s", "response", "springboot.response_code", "stacked"],
- 'lines': [
- ["resp_other", 'Other', 'incremental'],
- ["resp_1xx", '1xx', 'incremental'],
- ["resp_2xx", '2xx', 'incremental'],
- ["resp_3xx", '3xx', 'incremental'],
- ["resp_4xx", '4xx', 'incremental'],
- ["resp_5xx", '5xx', 'incremental'],
- ]
- },
- 'threads': {
- 'options': [None, "Threads", "current threads", "threads", "springboot.threads", "area"],
- 'lines': [
- ["threads_daemon", 'daemon', 'absolute'],
- ["threads", 'total', 'absolute'],
- ]
- },
- 'gc_time': {
- 'options': [None, "GC Time", "milliseconds", "garbage collection", "springboot.gc_time", "stacked"],
- 'lines': [
- ["gc_copy_time", 'Copy', 'incremental'],
- ["gc_marksweepcompact_time", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_time", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_time", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_time", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_time", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_time", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_time", 'G1 Old Generation', 'incremental'],
- ]
- },
- 'gc_ope': {
- 'options': [None, "GC Operations", "operations/s", "garbage collection", "springboot.gc_ope", "stacked"],
- 'lines': [
- ["gc_copy_count", 'Copy', 'incremental'],
- ["gc_marksweepcompact_count", 'MarkSweepCompact', 'incremental'],
- ["gc_parnew_count", 'ParNew', 'incremental'],
- ["gc_concurrentmarksweep_count", 'ConcurrentMarkSweep', 'incremental'],
- ["gc_ps_scavenge_count", 'PS Scavenge', 'incremental'],
- ["gc_ps_marksweep_count", 'PS MarkSweep', 'incremental'],
- ["gc_g1_young_generation_count", 'G1 Young Generation', 'incremental'],
- ["gc_g1_old_generation_count", 'G1 Old Generation', 'incremental'],
- ]
- },
- 'heap': {
- 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
- 'lines': [
- ["heap_committed", 'committed', "absolute"],
- ["heap_used", 'used', "absolute"],
- ]
- }
-}
-
-
-class ExtraChartError(ValueError):
- pass
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.url = self.configuration.get('url', "http://localhost:8080/metrics")
- self._setup_charts()
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- data = json.loads(raw_data)
- except ValueError:
- self.debug('%s is not a valid JSON page' % self.url)
- return None
-
- result = {
- 'resp_1xx': 0,
- 'resp_2xx': 0,
- 'resp_3xx': 0,
- 'resp_4xx': 0,
- 'resp_5xx': 0,
- 'resp_other': 0,
- }
-
- for key, value in data.iteritems():
- if 'counter.status.' in key:
- status_type = key[15:16] + 'xx'
- if status_type[0] not in '12345':
- status_type = 'other'
- result['resp_' + status_type] += value
- else:
- result[key.replace('.', '_')] = value
-
- return result or None
-
- def _setup_charts(self):
- self.order = []
- self.definitions = {}
- defaults = self.configuration.get('defaults', {})
-
- for chart in DEFAULT_ORDER:
- if defaults.get(chart, True):
- self.order.append(chart)
- self.definitions[chart] = DEFAULT_CHARTS[chart]
-
- for extra in self.configuration.get('extras', []):
- self._add_extra_chart(extra)
- self.order.append(extra['id'])
-
- def _add_extra_chart(self, chart):
- chart_id = chart.get('id', None) or self.die('id is not defined in extra chart')
- options = chart.get('options', None) or self.die('option is not defined in extra chart: %s' % chart_id)
- lines = chart.get('lines', None) or self.die('lines is not defined in extra chart: %s' % chart_id)
-
- title = options.get('title', None) or self.die('title is missing: %s' % chart_id)
- units = options.get('units', None) or self.die('units is missing: %s' % chart_id)
- family = options.get('family', title)
- context = options.get('context', 'springboot.' + title)
- charttype = options.get('charttype', 'line')
-
- result = {
- 'options': [None, title, units, family, context, charttype],
- 'lines': [],
- }
-
- for line in lines:
- dimension = line.get('dimension', None) or self.die('dimension is missing: %s' % chart_id)
- name = line.get('name', dimension)
- algorithm = line.get('algorithm', 'absolute')
- multiplier = line.get('multiplier', 1)
- divisor = line.get('divisor', 1)
- result['lines'].append([dimension, name, algorithm, multiplier, divisor])
-
- self.definitions[chart_id] = result
-
- @staticmethod
- def die(error_message):
- raise ExtraChartError(error_message)
diff --git a/collectors/python.d.plugin/springboot/springboot.conf b/collectors/python.d.plugin/springboot/springboot.conf
deleted file mode 100644
index 0cb369cd8..000000000
--- a/collectors/python.d.plugin/springboot/springboot.conf
+++ /dev/null
@@ -1,118 +0,0 @@
-# netdata python.d.plugin configuration for springboot
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, this plugin also supports the following:
-#
-# url: 'http://127.0.0.1/metrics' # the URL of the spring boot actuator metrics
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# defaults:
-# [chart_id]: true | false # enables/disables default charts, defaults true.
-# extras: {} # defines extra charts to monitor, please see the example below
-# - id: [chart_id]
-# options: {}
-# lines: []
-#
-# If all defaults is disabled and no extra charts are defined, this module will disable itself, as it has no data to
-# collect.
-#
-# Configuration example
-# ---------------------
-# example:
-# name: 'example'
-# url: 'http://localhost:8080/metrics'
-# defaults:
-# response_code: true
-# threads: true
-# gc_time: true
-# gc_ope: true
-# heap: false
-# extras:
-# - id: 'heap'
-# options: { title: 'Heap Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap', charttype: 'stacked' }
-# lines:
-# - { dimension: 'mem_free', name: 'free'}
-# - { dimension: 'mempool_eden_used', name: 'eden', algorithm: 'absolute', multiplier: 1, divisor: 1}
-# - { dimension: 'mempool_survivor_used', name: 'survivor', algorithm: 'absolute', multiplier: 1, divisor: 1}
-# - { dimension: 'mempool_tenured_used', name: 'tenured', algorithm: 'absolute', multiplier: 1, divisor: 1}
-# - id: 'heap_eden'
-# options: { title: 'Eden Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_eden', charttype: 'area' }
-# lines:
-# - { dimension: 'mempool_eden_used', name: 'used'}
-# - { dimension: 'mempool_eden_committed', name: 'committed'}
-# - id: 'heap_survivor'
-# options: { title: 'Survivor Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_survivor', charttype: 'area' }
-# lines:
-# - { dimension: 'mempool_survivor_used', name: 'used'}
-# - { dimension: 'mempool_survivor_committed', name: 'committed'}
-# - id: 'heap_tenured'
-# options: { title: 'Tenured Memory Usage', units: 'KB', family: 'heap memory', context: 'springboot.heap_tenured', charttype: 'area' }
-# lines:
-# - { dimension: 'mempool_tenured_used', name: 'used'}
-# - { dimension: 'mempool_tenured_committed', name: 'committed'}
-
-
-local:
- name: 'local'
- url: 'http://localhost:8080/metrics'
-
-local_ip:
- name: 'local'
- url: 'http://127.0.0.1:8080/metrics'
diff --git a/collectors/python.d.plugin/squid/README.md b/collectors/python.d.plugin/squid/README.md
index c29b69a19..ac6c83714 100644
--- a/collectors/python.d.plugin/squid/README.md
+++ b/collectors/python.d.plugin/squid/README.md
@@ -1,7 +1,10 @@
<!--
title: "Squid monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/squid/README.md"
sidebar_label: "Squid"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Squid monitoring with Netdata
@@ -35,7 +38,7 @@ It produces following charts:
## Configuration
Edit the `python.d/squid.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/tomcat/README.md b/collectors/python.d.plugin/tomcat/README.md
index b7525b88a..66ed6d97a 100644
--- a/collectors/python.d.plugin/tomcat/README.md
+++ b/collectors/python.d.plugin/tomcat/README.md
@@ -1,7 +1,10 @@
<!--
title: "Apache Tomcat monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tomcat/README.md"
sidebar_label: "Tomcat"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Apache Tomcat monitoring with Netdata
@@ -30,7 +33,7 @@ Charts:
## Configuration
Edit the `python.d/tomcat.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/tor/README.md b/collectors/python.d.plugin/tor/README.md
index b57d77c08..c66803766 100644
--- a/collectors/python.d.plugin/tor/README.md
+++ b/collectors/python.d.plugin/tor/README.md
@@ -1,7 +1,10 @@
<!--
title: "Tor monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/tor/README.md"
sidebar_label: "Tor"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Apps"
-->
# Tor monitoring with Netdata
@@ -23,7 +26,7 @@ It produces only one chart:
## Configuration
Edit the `python.d/tor.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/traefik/README.md b/collectors/python.d.plugin/traefik/README.md
index 251cdf2e4..cf30a82a4 100644
--- a/collectors/python.d.plugin/traefik/README.md
+++ b/collectors/python.d.plugin/traefik/README.md
@@ -1,7 +1,10 @@
<!--
title: "Traefik monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md
-sidebar_label: "Traefik"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/traefik/README.md"
+sidebar_label: "traefik-python.d.plugin"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Traefik monitoring with Netdata
@@ -10,45 +13,46 @@ Uses the `health` API to provide statistics.
It produces:
-1. **Responses** by statuses
+1. **Responses** by statuses
- - success (1xx, 2xx, 304)
- - error (5xx)
- - redirect (3xx except 304)
- - bad (4xx)
- - other (all other responses)
+ - success (1xx, 2xx, 304)
+ - error (5xx)
+ - redirect (3xx except 304)
+ - bad (4xx)
+ - other (all other responses)
-2. **Responses** by codes
+2. **Responses** by codes
- - 2xx (successful)
- - 5xx (internal server errors)
- - 3xx (redirect)
- - 4xx (bad)
- - 1xx (informational)
- - other (non-standart responses)
+ - 2xx (successful)
+ - 5xx (internal server errors)
+ - 3xx (redirect)
+ - 4xx (bad)
+ - 1xx (informational)
+ - other (non-standart responses)
-3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
+3. **Detailed Response Codes** requests/s (number of responses for each response code family individually)
-4. **Requests**/s
+4. **Requests**/s
- - request statistics
+ - request statistics
-5. **Total response time**
+5. **Total response time**
- - sum of all response time
+ - sum of all response time
-6. **Average response time**
+6. **Average response time**
-7. **Average response time per iteration**
+7. **Average response time per iteration**
-8. **Uptime**
+8. **Uptime**
- - Traefik server uptime
+ - Traefik server uptime
## Configuration
-Edit the `python.d/traefik.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+Edit the `python.d/traefik.conf` configuration file using `edit-config` from the
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically
+at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
@@ -60,11 +64,11 @@ Needs only `url` to server's `health`
Here is an example for local server:
```yaml
-update_every : 1
-priority : 60000
+update_every: 1
+priority: 60000
local:
- url : 'http://localhost:8080/health'
+ url: 'http://localhost:8080/health'
```
Without configuration, module attempts to connect to `http://localhost:8080/health`.
diff --git a/collectors/python.d.plugin/uwsgi/README.md b/collectors/python.d.plugin/uwsgi/README.md
index 58db1a41a..dcc2dc38e 100644
--- a/collectors/python.d.plugin/uwsgi/README.md
+++ b/collectors/python.d.plugin/uwsgi/README.md
@@ -1,7 +1,10 @@
<!--
title: "uWSGI monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/uwsgi/README.md"
sidebar_label: "uWSGI"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# uWSGI monitoring with Netdata
@@ -29,7 +32,7 @@ Following charts are drawn:
## Configuration
Edit the `python.d/uwsgi.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
index 018905f06..ebcc00c51 100644
--- a/collectors/python.d.plugin/varnish/README.md
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -1,7 +1,10 @@
<!--
title: "Varnish Cache monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md"
sidebar_label: "Varnish Cache"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Webapps"
-->
# Varnish Cache monitoring with Netdata
@@ -45,7 +48,7 @@ For every storage (SMF, SMA, or MSE):
## Configuration
Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/w1sensor/README.md b/collectors/python.d.plugin/w1sensor/README.md
index b6d2b2d63..12a14a19a 100644
--- a/collectors/python.d.plugin/w1sensor/README.md
+++ b/collectors/python.d.plugin/w1sensor/README.md
@@ -1,7 +1,10 @@
<!--
title: "1-Wire Sensors monitoring with Netdata"
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/w1sensor/README.md"
sidebar_label: "1-Wire sensors"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Remotes/Devices"
-->
# 1-Wire Sensors monitoring with Netdata
@@ -16,7 +19,7 @@ Charts are created dynamically based on the number of detected sensors.
## Configuration
Edit the `python.d/w1sensor.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
```bash
cd /etc/netdata # Replace this path with your Netdata config directory, if different
diff --git a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
index c4f847bf0..66797ced3 100644
--- a/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
+++ b/collectors/python.d.plugin/w1sensor/w1sensor.chart.py
@@ -15,7 +15,7 @@ update_every = 5
W1_DIR = '/sys/bus/w1/devices/'
# Lines matching the following regular expression contain a temperature value
-RE_TEMP = re.compile(r' t=(\d+)')
+RE_TEMP = re.compile(r' t=(-?\d+)')
ORDER = [
'temp',
diff --git a/collectors/python.d.plugin/zscores/README.md b/collectors/python.d.plugin/zscores/README.md
index 4f84a6c1f..d89aa6a0f 100644
--- a/collectors/python.d.plugin/zscores/README.md
+++ b/collectors/python.d.plugin/zscores/README.md
@@ -1,14 +1,18 @@
<!--
title: "zscores"
description: "Use statistical anomaly detection to narrow your focus and shorten root cause analysis."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/README.md
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/zscores/README.md"
+sidebar_label: "zscores"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/Uncategorized"
-->
# Z-Scores - basic anomaly detection for your key metrics and charts
Smoothed, rolling [Z-Scores](https://en.wikipedia.org/wiki/Standard_score) for selected metrics or charts.
-This collector uses the [Netdata rest api](https://learn.netdata.cloud/docs/agent/web/api) to get the `mean` and `stddev`
+This collector uses the [Netdata rest api](https://github.com/netdata/netdata/blob/master/web/api/README.md) to get the `mean` and `stddev`
for each dimension on specified charts over a time range (defined by `train_secs` and `offset_secs`). For each dimension
it will calculate a Z-Score as `z = (x - mean) / stddev` (clipped at `z_clip`). Scores are then smoothed over
time (`z_smooth_n`) and, if `mode: 'per_chart'`, aggregated across dimensions to a smoothed, rolling chart level Z-Score