summaryrefslogtreecommitdiffstats
path: root/src/collectors/python.d.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-26 08:15:20 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-26 08:15:20 +0000
commit87d772a7d708fec12f48cd8adc0dedff6e1025da (patch)
tree1fee344c64cc3f43074a01981e21126c8482a522 /src/collectors/python.d.plugin
parentAdding upstream version 1.46.3. (diff)
downloadnetdata-87d772a7d708fec12f48cd8adc0dedff6e1025da.tar.xz
netdata-87d772a7d708fec12f48cd8adc0dedff6e1025da.zip
Adding upstream version 1.47.0.upstream/1.47.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
l---------src/collectors/python.d.plugin/alarms/README.md1
-rw-r--r--src/collectors/python.d.plugin/alarms/alarms.chart.py95
-rw-r--r--src/collectors/python.d.plugin/alarms/alarms.conf60
-rw-r--r--src/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md201
-rw-r--r--src/collectors/python.d.plugin/alarms/metadata.yaml177
-rw-r--r--src/collectors/python.d.plugin/am2320/integrations/am2320.md34
-rw-r--r--src/collectors/python.d.plugin/beanstalk/beanstalk.chart.py252
-rw-r--r--src/collectors/python.d.plugin/beanstalk/beanstalk.conf78
-rw-r--r--src/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md219
-rw-r--r--src/collectors/python.d.plugin/boinc/integrations/boinc.md34
-rw-r--r--src/collectors/python.d.plugin/ceph/integrations/ceph.md34
l---------src/collectors/python.d.plugin/changefinder/README.md1
-rw-r--r--src/collectors/python.d.plugin/changefinder/changefinder.chart.py185
-rw-r--r--src/collectors/python.d.plugin/changefinder/changefinder.conf74
-rw-r--r--src/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md217
-rw-r--r--src/collectors/python.d.plugin/changefinder/metadata.yaml212
-rw-r--r--src/collectors/python.d.plugin/dovecot/dovecot.chart.py143
-rw-r--r--src/collectors/python.d.plugin/dovecot/dovecot.conf98
-rw-r--r--src/collectors/python.d.plugin/dovecot/integrations/dovecot.md197
-rw-r--r--src/collectors/python.d.plugin/dovecot/metadata.yaml207
l---------src/collectors/python.d.plugin/example/README.md1
-rw-r--r--src/collectors/python.d.plugin/example/example.chart.py51
-rw-r--r--src/collectors/python.d.plugin/example/example.conf87
-rw-r--r--src/collectors/python.d.plugin/example/integrations/example_collector.md171
-rw-r--r--src/collectors/python.d.plugin/example/metadata.yaml138
-rw-r--r--src/collectors/python.d.plugin/exim/exim.chart.py39
-rw-r--r--src/collectors/python.d.plugin/exim/exim.conf91
-rw-r--r--src/collectors/python.d.plugin/exim/integrations/exim.md181
-rw-r--r--src/collectors/python.d.plugin/exim/metadata.yaml132
-rw-r--r--src/collectors/python.d.plugin/gearman/gearman.chart.py243
-rw-r--r--src/collectors/python.d.plugin/gearman/gearman.conf75
-rw-r--r--src/collectors/python.d.plugin/gearman/integrations/gearman.md210
-rw-r--r--src/collectors/python.d.plugin/gearman/metadata.yaml168
-rw-r--r--src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md34
-rw-r--r--src/collectors/python.d.plugin/haproxy/metadata.yaml2
-rw-r--r--src/collectors/python.d.plugin/icecast/icecast.chart.py94
-rw-r--r--src/collectors/python.d.plugin/icecast/icecast.conf81
-rw-r--r--src/collectors/python.d.plugin/icecast/integrations/icecast.md166
-rw-r--r--src/collectors/python.d.plugin/icecast/metadata.yaml127
-rw-r--r--src/collectors/python.d.plugin/ipfs/integrations/ipfs.md203
-rw-r--r--src/collectors/python.d.plugin/ipfs/ipfs.chart.py149
-rw-r--r--src/collectors/python.d.plugin/ipfs/ipfs.conf82
-rw-r--r--src/collectors/python.d.plugin/ipfs/metadata.yaml172
-rw-r--r--src/collectors/python.d.plugin/memcached/memcached.chart.py197
-rw-r--r--src/collectors/python.d.plugin/memcached/memcached.conf90
-rw-r--r--src/collectors/python.d.plugin/monit/integrations/monit.md214
-rw-r--r--src/collectors/python.d.plugin/monit/metadata.yaml217
-rw-r--r--src/collectors/python.d.plugin/monit/monit.chart.py360
-rw-r--r--src/collectors/python.d.plugin/monit/monit.conf86
l---------src/collectors/python.d.plugin/nsd/README.md1
-rw-r--r--src/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md199
-rw-r--r--src/collectors/python.d.plugin/nsd/metadata.yaml201
-rw-r--r--src/collectors/python.d.plugin/nsd/nsd.chart.py105
-rw-r--r--src/collectors/python.d.plugin/nsd/nsd.conf91
-rw-r--r--src/collectors/python.d.plugin/nvidia_smi/README.md81
-rw-r--r--src/collectors/python.d.plugin/nvidia_smi/metadata.yaml166
-rw-r--r--src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py651
-rw-r--r--src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf68
-rw-r--r--src/collectors/python.d.plugin/openldap/integrations/openldap.md34
-rw-r--r--src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md34
-rw-r--r--src/collectors/python.d.plugin/pandas/integrations/pandas.md34
-rw-r--r--src/collectors/python.d.plugin/postfix/integrations/postfix.md151
-rw-r--r--src/collectors/python.d.plugin/postfix/metadata.yaml124
-rw-r--r--src/collectors/python.d.plugin/postfix/postfix.chart.py52
-rw-r--r--src/collectors/python.d.plugin/postfix/postfix.conf72
-rw-r--r--src/collectors/python.d.plugin/puppet/integrations/puppet.md215
-rw-r--r--src/collectors/python.d.plugin/puppet/metadata.yaml185
-rw-r--r--src/collectors/python.d.plugin/puppet/puppet.chart.py121
-rw-r--r--src/collectors/python.d.plugin/puppet/puppet.conf94
-rw-r--r--src/collectors/python.d.plugin/python.d.conf40
-rw-r--r--src/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py327
-rw-r--r--src/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md190
-rw-r--r--src/collectors/python.d.plugin/rethinkdbs/metadata.yaml188
-rw-r--r--src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py247
-rw-r--r--src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf76
l---------src/collectors/python.d.plugin/retroshare/README.md1
-rw-r--r--src/collectors/python.d.plugin/retroshare/integrations/retroshare.md191
-rw-r--r--src/collectors/python.d.plugin/retroshare/metadata.yaml144
-rw-r--r--src/collectors/python.d.plugin/retroshare/retroshare.chart.py78
-rw-r--r--src/collectors/python.d.plugin/retroshare/retroshare.conf72
l---------src/collectors/python.d.plugin/riakkv/README.md1
-rw-r--r--src/collectors/python.d.plugin/riakkv/integrations/riakkv.md220
-rw-r--r--src/collectors/python.d.plugin/riakkv/riakkv.chart.py334
-rw-r--r--src/collectors/python.d.plugin/riakkv/riakkv.conf68
-rw-r--r--src/collectors/python.d.plugin/samba/integrations/samba.md34
-rw-r--r--src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md34
-rw-r--r--src/collectors/python.d.plugin/squid/integrations/squid.md199
-rw-r--r--src/collectors/python.d.plugin/squid/squid.chart.py123
-rw-r--r--src/collectors/python.d.plugin/squid/squid.conf167
-rw-r--r--src/collectors/python.d.plugin/tomcat/integrations/tomcat.md203
-rw-r--r--src/collectors/python.d.plugin/tomcat/metadata.yaml200
-rw-r--r--src/collectors/python.d.plugin/tomcat/tomcat.chart.py199
-rw-r--r--src/collectors/python.d.plugin/tomcat/tomcat.conf89
-rw-r--r--src/collectors/python.d.plugin/tor/integrations/tor.md197
-rw-r--r--src/collectors/python.d.plugin/tor/metadata.yaml143
-rw-r--r--src/collectors/python.d.plugin/tor/tor.chart.py109
-rw-r--r--src/collectors/python.d.plugin/tor/tor.conf81
-rw-r--r--src/collectors/python.d.plugin/traefik/metadata.yaml2
-rw-r--r--src/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md219
-rw-r--r--src/collectors/python.d.plugin/uwsgi/metadata.yaml201
-rw-r--r--src/collectors/python.d.plugin/uwsgi/uwsgi.chart.py177
-rw-r--r--src/collectors/python.d.plugin/uwsgi/uwsgi.conf92
-rw-r--r--src/collectors/python.d.plugin/varnish/integrations/varnish.md34
-rw-r--r--src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md34
-rw-r--r--src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md34
l---------src/go/plugin/go.d/modules/beanstalk/README.md (renamed from src/collectors/python.d.plugin/beanstalk/README.md)0
-rw-r--r--src/go/plugin/go.d/modules/beanstalk/metadata.yaml (renamed from src/collectors/python.d.plugin/beanstalk/metadata.yaml)228
l---------src/go/plugin/go.d/modules/dovecot/README.md (renamed from src/collectors/python.d.plugin/dovecot/README.md)0
l---------src/go/plugin/go.d/modules/exim/README.md (renamed from src/collectors/python.d.plugin/exim/README.md)0
l---------src/go/plugin/go.d/modules/gearman/README.md (renamed from src/collectors/python.d.plugin/gearman/README.md)0
l---------src/go/plugin/go.d/modules/icecast/README.md (renamed from src/collectors/python.d.plugin/icecast/README.md)0
l---------src/go/plugin/go.d/modules/ipfs/README.md (renamed from src/collectors/python.d.plugin/ipfs/README.md)0
l---------src/go/plugin/go.d/modules/memcached/README.md (renamed from src/collectors/python.d.plugin/memcached/README.md)0
-rw-r--r--src/go/plugin/go.d/modules/memcached/integrations/memcached.md (renamed from src/collectors/python.d.plugin/memcached/integrations/memcached.md)114
-rw-r--r--src/go/plugin/go.d/modules/memcached/metadata.yaml (renamed from src/collectors/python.d.plugin/memcached/metadata.yaml)96
l---------src/go/plugin/go.d/modules/monit/README.md (renamed from src/collectors/python.d.plugin/monit/README.md)0
l---------src/go/plugin/go.d/modules/postfix/README.md (renamed from src/collectors/python.d.plugin/postfix/README.md)0
l---------src/go/plugin/go.d/modules/puppet/README.md (renamed from src/collectors/python.d.plugin/puppet/README.md)0
l---------src/go/plugin/go.d/modules/rethinkdb/README.md (renamed from src/collectors/python.d.plugin/rethinkdbs/README.md)0
-rw-r--r--src/go/plugin/go.d/modules/riakkv/metadata.yaml (renamed from src/collectors/python.d.plugin/riakkv/metadata.yaml)204
l---------src/go/plugin/go.d/modules/squid/README.md (renamed from src/collectors/python.d.plugin/squid/README.md)0
-rw-r--r--src/go/plugin/go.d/modules/squid/metadata.yaml (renamed from src/collectors/python.d.plugin/squid/metadata.yaml)143
l---------src/go/plugin/go.d/modules/tomcat/README.md (renamed from src/collectors/python.d.plugin/tomcat/README.md)0
l---------src/go/plugin/go.d/modules/tor/README.md (renamed from src/collectors/python.d.plugin/tor/README.md)0
l---------src/go/plugin/go.d/modules/uwsgi/README.md (renamed from src/collectors/python.d.plugin/uwsgi/README.md)0
125 files changed, 836 insertions, 13756 deletions
diff --git a/src/collectors/python.d.plugin/alarms/README.md b/src/collectors/python.d.plugin/alarms/README.md
deleted file mode 120000
index 85759ae6c..000000000
--- a/src/collectors/python.d.plugin/alarms/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/netdata_agent_alarms.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/alarms/alarms.chart.py b/src/collectors/python.d.plugin/alarms/alarms.chart.py
deleted file mode 100644
index d19427358..000000000
--- a/src/collectors/python.d.plugin/alarms/alarms.chart.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: alarms netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 10
-disabled_by_default = True
-
-
-def charts_template(sm, alarm_status_chart_type='line'):
- order = [
- 'alarms',
- 'values'
- ]
-
- mappings = ', '.join(['{0}={1}'.format(k, v) for k, v in sm.items()])
- charts = {
- 'alarms': {
- 'options': [None, 'Alarms ({0})'.format(mappings), 'status', 'status', 'alarms.status', alarm_status_chart_type],
- 'lines': [],
- 'variables': [
- ['alarms_num'],
- ]
- },
- 'values': {
- 'options': [None, 'Alarm Values', 'value', 'value', 'alarms.value', 'line'],
- 'lines': [],
- }
- }
- return order, charts
-
-
-DEFAULT_STATUS_MAP = {'CLEAR': 0, 'WARNING': 1, 'CRITICAL': 2}
-DEFAULT_URL = 'http://127.0.0.1:19999/api/v1/alarms?all'
-DEFAULT_COLLECT_ALARM_VALUES = False
-DEFAULT_ALARM_STATUS_CHART_TYPE = 'line'
-DEFAULT_ALARM_CONTAINS_WORDS = ''
-DEFAULT_ALARM_EXCLUDES_WORDS = ''
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.sm = self.configuration.get('status_map', DEFAULT_STATUS_MAP)
- self.alarm_status_chart_type = self.configuration.get('alarm_status_chart_type', DEFAULT_ALARM_STATUS_CHART_TYPE)
- self.order, self.definitions = charts_template(self.sm, self.alarm_status_chart_type)
- self.url = self.configuration.get('url', DEFAULT_URL)
- self.collect_alarm_values = bool(self.configuration.get('collect_alarm_values', DEFAULT_COLLECT_ALARM_VALUES))
- self.collected_dims = {'alarms': set(), 'values': set()}
- self.alarm_contains_words = self.configuration.get('alarm_contains_words', DEFAULT_ALARM_CONTAINS_WORDS)
- self.alarm_contains_words_list = [alarm_contains_word.lstrip(' ').rstrip(' ') for alarm_contains_word in self.alarm_contains_words.split(',')]
- self.alarm_excludes_words = self.configuration.get('alarm_excludes_words', DEFAULT_ALARM_EXCLUDES_WORDS)
- self.alarm_excludes_words_list = [alarm_excludes_word.lstrip(' ').rstrip(' ') for alarm_excludes_word in self.alarm_excludes_words.split(',')]
-
- def _get_data(self):
- raw_data = self._get_raw_data()
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
- alarms = raw_data.get('alarms', {})
- if self.alarm_contains_words != '':
- alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_contains_word in
- self.alarm_contains_words_list if alarm_contains_word in alarm_name}
- if self.alarm_excludes_words != '':
- alarms = {alarm_name: alarms[alarm_name] for alarm_name in alarms for alarm_excludes_word in
- self.alarm_excludes_words_list if alarm_excludes_word not in alarm_name}
-
- data = {a: self.sm[alarms[a]['status']] for a in alarms if alarms[a]['status'] in self.sm}
- self.update_charts('alarms', data)
- data['alarms_num'] = len(data)
-
- if self.collect_alarm_values:
- data_values = {'{}_value'.format(a): alarms[a]['value'] * 100 for a in alarms if 'value' in alarms[a] and alarms[a]['value'] is not None}
- self.update_charts('values', data_values, divisor=100)
- data.update(data_values)
-
- return data
-
- def update_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- if not self.charts:
- return
-
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
diff --git a/src/collectors/python.d.plugin/alarms/alarms.conf b/src/collectors/python.d.plugin/alarms/alarms.conf
deleted file mode 100644
index 06d76c3b3..000000000
--- a/src/collectors/python.d.plugin/alarms/alarms.conf
+++ /dev/null
@@ -1,60 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-# what url to pull data from
-local:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- # define how to map alarm status to numbers for the chart
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- # set to true to include a chart with calculated alarm values over time
- collect_alarm_values: false
- # define the type of chart for plotting status over time e.g. 'line' or 'stacked'
- alarm_status_chart_type: 'line'
- # a "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only
- # alarms with "cpu" or "load" in alarm name. Default includes all.
- alarm_contains_words: ''
- # a "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude
- # all alarms with "cpu" or "load" in alarm name. Default excludes None.
- alarm_excludes_words: ''
diff --git a/src/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md b/src/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
deleted file mode 100644
index 57be4f092..000000000
--- a/src/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/alarms/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/alarms/metadata.yaml"
-sidebar_label: "Netdata Agent alarms"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Netdata Agent alarms
-
-Plugin: python.d.plugin
-Module: alarms
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
-
-
-Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Netdata Agent alarms instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| alarms.status | a dimension per alarm representing the latest status of the alarm. | status |
-| alarms.values | a dimension per alarm representing the latest collected value of the alarm. | value |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/alarms.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/alarms.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |
-| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes |
-| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |
-| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |
-| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes |
-| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-jobs:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
-
-```
-##### Advanced
-
-An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
-"ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-ML:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: true
- alarm_status_chart_type: 'stacked'
- alarm_contains_words: 'ml_'
-
-Default:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: false
- alarm_status_chart_type: 'stacked'
- alarm_excludes_words: 'ml_'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `alarms` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin alarms debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/alarms/metadata.yaml b/src/collectors/python.d.plugin/alarms/metadata.yaml
deleted file mode 100644
index b6bee7594..000000000
--- a/src/collectors/python.d.plugin/alarms/metadata.yaml
+++ /dev/null
@@ -1,177 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: alarms
- monitored_instance:
- name: Netdata Agent alarms
- link: /src/collectors/python.d.plugin/alarms/README.md
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - alarms
- - netdata
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector creates an 'Alarms' menu with one line plot of `alarms.status`.
- method_description: |
- Alarm status is read from the Netdata agent rest api [`/api/v1/alarms?all`](https://learn.netdata.cloud/api#/alerts/alerts1).
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- It discovers instances of Netdata running on localhost, and gathers metrics from `http://127.0.0.1:19999/api/v1/alarms?all`. `CLEAR` status is mapped to `0`, `WARNING` to `1` and `CRITICAL` to `2`. Also, by default all alarms produced will be monitored.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/alarms.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: url
- description: Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent.
- default_value: http://127.0.0.1:19999/api/v1/alarms?all
- required: true
- - name: status_map
- description: Mapping of alarm status to integer number that will be the metric value collected.
- default_value: '{"CLEAR": 0, "WARNING": 1, "CRITICAL": 2}'
- required: true
- - name: collect_alarm_values
- description: set to true to include a chart with calculated alarm values over time.
- default_value: false
- required: true
- - name: alarm_status_chart_type
- description: define the type of chart for plotting status over time e.g. 'line' or 'stacked'.
- default_value: "line"
- required: true
- - name: alarm_contains_words
- description: >
- A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all.
- default_value: ""
- required: true
- - name: alarm_excludes_words
- description: >
- A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None.
- default_value: ""
- required: true
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- jobs:
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- - name: Advanced
- folding:
- enabled: true
- description: |
- An advanced example configuration with multiple jobs collecting different subsets of alarms for plotting on different charts.
- "ML" job will collect status and values for all alarms with "ml_" in the name. Default job will collect status for all other alarms.
- config: |
- ML:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: true
- alarm_status_chart_type: 'stacked'
- alarm_contains_words: 'ml_'
-
- Default:
- update_every: 5
- url: 'http://127.0.0.1:19999/api/v1/alarms?all'
- status_map:
- CLEAR: 0
- WARNING: 1
- CRITICAL: 2
- collect_alarm_values: false
- alarm_status_chart_type: 'stacked'
- alarm_excludes_words: 'ml_'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: alarms.status
- description: Alarms ({status mapping})
- unit: "status"
- chart_type: line
- dimensions:
- - name: a dimension per alarm representing the latest status of the alarm.
- - name: alarms.values
- description: Alarm Values
- unit: "value"
- chart_type: line
- dimensions:
- - name: a dimension per alarm representing the latest collected value of the alarm.
diff --git a/src/collectors/python.d.plugin/am2320/integrations/am2320.md b/src/collectors/python.d.plugin/am2320/integrations/am2320.md
index f96657624..ea0e505c2 100644
--- a/src/collectors/python.d.plugin/am2320/integrations/am2320.md
+++ b/src/collectors/python.d.plugin/am2320/integrations/am2320.md
@@ -156,6 +156,7 @@ local_sensor:
### Debug Mode
+
To troubleshoot issues with the `am2320` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -178,4 +179,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin am2320 debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `am2320` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep am2320
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep am2320 /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep am2320
+```
+
diff --git a/src/collectors/python.d.plugin/beanstalk/beanstalk.chart.py b/src/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
deleted file mode 100644
index 396543e5a..000000000
--- a/src/collectors/python.d.plugin/beanstalk/beanstalk.chart.py
+++ /dev/null
@@ -1,252 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: beanstalk netdata python.d module
-# Author: ilyam8
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import beanstalkc
-
- BEANSTALKC = True
-except ImportError:
- BEANSTALKC = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.loaders import load_yaml
-
-ORDER = [
- 'cpu_usage',
- 'jobs_rate',
- 'connections_rate',
- 'commands_rate',
- 'current_tubes',
- 'current_jobs',
- 'current_connections',
- 'binlog',
- 'uptime',
-]
-
-CHARTS = {
- 'cpu_usage': {
- 'options': [None, 'Cpu Usage', 'cpu time', 'server statistics', 'beanstalk.cpu_usage', 'area'],
- 'lines': [
- ['rusage-utime', 'user', 'incremental'],
- ['rusage-stime', 'system', 'incremental']
- ]
- },
- 'jobs_rate': {
- 'options': [None, 'Jobs Rate', 'jobs/s', 'server statistics', 'beanstalk.jobs_rate', 'line'],
- 'lines': [
- ['total-jobs', 'total', 'incremental'],
- ['job-timeouts', 'timeouts', 'incremental']
- ]
- },
- 'connections_rate': {
- 'options': [None, 'Connections Rate', 'connections/s', 'server statistics', 'beanstalk.connections_rate',
- 'area'],
- 'lines': [
- ['total-connections', 'connections', 'incremental']
- ]
- },
- 'commands_rate': {
- 'options': [None, 'Commands Rate', 'commands/s', 'server statistics', 'beanstalk.commands_rate', 'stacked'],
- 'lines': [
- ['cmd-put', 'put', 'incremental'],
- ['cmd-peek', 'peek', 'incremental'],
- ['cmd-peek-ready', 'peek-ready', 'incremental'],
- ['cmd-peek-delayed', 'peek-delayed', 'incremental'],
- ['cmd-peek-buried', 'peek-buried', 'incremental'],
- ['cmd-reserve', 'reserve', 'incremental'],
- ['cmd-use', 'use', 'incremental'],
- ['cmd-watch', 'watch', 'incremental'],
- ['cmd-ignore', 'ignore', 'incremental'],
- ['cmd-delete', 'delete', 'incremental'],
- ['cmd-release', 'release', 'incremental'],
- ['cmd-bury', 'bury', 'incremental'],
- ['cmd-kick', 'kick', 'incremental'],
- ['cmd-stats', 'stats', 'incremental'],
- ['cmd-stats-job', 'stats-job', 'incremental'],
- ['cmd-stats-tube', 'stats-tube', 'incremental'],
- ['cmd-list-tubes', 'list-tubes', 'incremental'],
- ['cmd-list-tube-used', 'list-tube-used', 'incremental'],
- ['cmd-list-tubes-watched', 'list-tubes-watched', 'incremental'],
- ['cmd-pause-tube', 'pause-tube', 'incremental']
- ]
- },
- 'current_tubes': {
- 'options': [None, 'Current Tubes', 'tubes', 'server statistics', 'beanstalk.current_tubes', 'area'],
- 'lines': [
- ['current-tubes', 'tubes']
- ]
- },
- 'current_jobs': {
- 'options': [None, 'Current Jobs', 'jobs', 'server statistics', 'beanstalk.current_jobs', 'stacked'],
- 'lines': [
- ['current-jobs-urgent', 'urgent'],
- ['current-jobs-ready', 'ready'],
- ['current-jobs-reserved', 'reserved'],
- ['current-jobs-delayed', 'delayed'],
- ['current-jobs-buried', 'buried']
- ]
- },
- 'current_connections': {
- 'options': [None, 'Current Connections', 'connections', 'server statistics',
- 'beanstalk.current_connections', 'line'],
- 'lines': [
- ['current-connections', 'written'],
- ['current-producers', 'producers'],
- ['current-workers', 'workers'],
- ['current-waiting', 'waiting']
- ]
- },
- 'binlog': {
- 'options': [None, 'Binlog', 'records/s', 'server statistics', 'beanstalk.binlog', 'line'],
- 'lines': [
- ['binlog-records-written', 'written', 'incremental'],
- ['binlog-records-migrated', 'migrated', 'incremental']
- ]
- },
- 'uptime': {
- 'options': [None, 'Uptime', 'seconds', 'server statistics', 'beanstalk.uptime', 'line'],
- 'lines': [
- ['uptime'],
- ]
- }
-}
-
-
-def tube_chart_template(name):
- order = [
- '{0}_jobs_rate'.format(name),
- '{0}_jobs'.format(name),
- '{0}_connections'.format(name),
- '{0}_commands'.format(name),
- '{0}_pause'.format(name)
- ]
- family = 'tube {0}'.format(name)
-
- charts = {
- order[0]: {
- 'options': [None, 'Job Rate', 'jobs/s', family, 'beanstalk.jobs_rate', 'area'],
- 'lines': [
- ['_'.join([name, 'total-jobs']), 'jobs', 'incremental']
- ]
- },
- order[1]: {
- 'options': [None, 'Jobs', 'jobs', family, 'beanstalk.jobs', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-jobs-urgent']), 'urgent'],
- ['_'.join([name, 'current-jobs-ready']), 'ready'],
- ['_'.join([name, 'current-jobs-reserved']), 'reserved'],
- ['_'.join([name, 'current-jobs-delayed']), 'delayed'],
- ['_'.join([name, 'current-jobs-buried']), 'buried']
- ]
- },
- order[2]: {
- 'options': [None, 'Connections', 'connections', family, 'beanstalk.connections', 'stacked'],
- 'lines': [
- ['_'.join([name, 'current-using']), 'using'],
- ['_'.join([name, 'current-waiting']), 'waiting'],
- ['_'.join([name, 'current-watching']), 'watching']
- ]
- },
- order[3]: {
- 'options': [None, 'Commands', 'commands/s', family, 'beanstalk.commands', 'stacked'],
- 'lines': [
- ['_'.join([name, 'cmd-delete']), 'deletes', 'incremental'],
- ['_'.join([name, 'cmd-pause-tube']), 'pauses', 'incremental']
- ]
- },
- order[4]: {
- 'options': [None, 'Pause', 'seconds', family, 'beanstalk.pause', 'stacked'],
- 'lines': [
- ['_'.join([name, 'pause']), 'since'],
- ['_'.join([name, 'pause-time-left']), 'left']
- ]
- }
- }
-
- return order, charts
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.configuration = configuration
- self.order = list(ORDER)
- self.definitions = dict(CHARTS)
- self.conn = None
- self.alive = True
-
- def check(self):
- if not BEANSTALKC:
- self.error("'beanstalkc' module is needed to use beanstalk.chart.py")
- return False
-
- self.conn = self.connect()
-
- return True if self.conn else False
-
- def get_data(self):
- """
- :return: dict
- """
- if not self.is_alive():
- return None
-
- active_charts = self.charts.active_charts()
- data = dict()
-
- try:
- data.update(self.conn.stats())
-
- for tube in self.conn.tubes():
- stats = self.conn.stats_tube(tube)
-
- if tube + '_jobs_rate' not in active_charts:
- self.create_new_tube_charts(tube)
-
- for stat in stats:
- data['_'.join([tube, stat])] = stats[stat]
-
- except beanstalkc.SocketError:
- self.alive = False
- return None
-
- return data or None
-
- def create_new_tube_charts(self, tube):
- order, charts = tube_chart_template(tube)
-
- for chart_name in order:
- params = [chart_name] + charts[chart_name]['options']
- dimensions = charts[chart_name]['lines']
-
- new_chart = self.charts.add_chart(params)
- for dimension in dimensions:
- new_chart.add_dimension(dimension)
-
- def connect(self):
- host = self.configuration.get('host', '127.0.0.1')
- port = self.configuration.get('port', 11300)
- timeout = self.configuration.get('timeout', 1)
- try:
- return beanstalkc.Connection(host=host,
- port=port,
- connect_timeout=timeout,
- parse_yaml=load_yaml)
- except beanstalkc.SocketError as error:
- self.error('Connection to {0}:{1} failed: {2}'.format(host, port, error))
- return None
-
- def reconnect(self):
- try:
- self.conn.reconnect()
- self.alive = True
- return True
- except beanstalkc.SocketError:
- return False
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
diff --git a/src/collectors/python.d.plugin/beanstalk/beanstalk.conf b/src/collectors/python.d.plugin/beanstalk/beanstalk.conf
deleted file mode 100644
index 6d9773a19..000000000
--- a/src/collectors/python.d.plugin/beanstalk/beanstalk.conf
+++ /dev/null
@@ -1,78 +0,0 @@
-# netdata python.d.plugin configuration for beanstalk
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# chart_cleanup sets the default chart cleanup interval in iterations.
-# A chart is marked as obsolete if it has not been updated
-# 'chart_cleanup' iterations in a row.
-# When a plugin sends the obsolete flag, the charts are not deleted
-# from netdata immediately.
-# They will be hidden immediately (not offered to dashboard viewer,
-# streamed upstream and archived to external databases) and deleted one hour
-# later (configurable from netdata.conf).
-# chart_cleanup: 10
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-# chart_cleanup: 10 # the JOB's chart cleanup interval in iterations
-#
-# Additionally to the above, beanstalk also supports the following:
-#
-# host: 'host' # Server ip address or hostname. Default: 127.0.0.1
-# port: port # Beanstalkd port. Default:
-#
-# ----------------------------------------------------------------------
diff --git a/src/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md b/src/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
deleted file mode 100644
index 841444354..000000000
--- a/src/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
+++ /dev/null
@@ -1,219 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/beanstalk/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/beanstalk/metadata.yaml"
-sidebar_label: "Beanstalk"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Message Brokers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Beanstalk
-
-
-<img src="https://netdata.cloud/img/beanstalk.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: beanstalk
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management.
-
-The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Beanstalk instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| beanstalk.cpu_usage | user, system | cpu time |
-| beanstalk.jobs_rate | total, timeouts | jobs/s |
-| beanstalk.connections_rate | connections | connections/s |
-| beanstalk.commands_rate | put, peek, peek-ready, peek-delayed, peek-buried, reserve, use, watch, ignore, delete, bury, kick, stats, stats-job, stats-tube, list-tubes, list-tube-used, list-tubes-watched, pause-tube | commands/s |
-| beanstalk.current_tubes | tubes | tubes |
-| beanstalk.current_jobs | urgent, ready, reserved, delayed, buried | jobs |
-| beanstalk.current_connections | written, producers, workers, waiting | connections |
-| beanstalk.binlog | written, migrated | records/s |
-| beanstalk.uptime | uptime | seconds |
-
-### Per tube
-
-Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| beanstalk.jobs_rate | jobs | jobs/s |
-| beanstalk.jobs | urgent, ready, reserved, delayed, buried | jobs |
-| beanstalk.connections | using, waiting, watching | connections |
-| beanstalk.commands | deletes, pauses | commands/s |
-| beanstalk.pause | since, left | seconds |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ beanstalk_server_buried_jobs ](https://github.com/netdata/netdata/blob/master/src/health/health.d/beanstalkd.conf) | beanstalk.current_jobs | number of buried jobs across all tubes. You need to manually kick them so they can be processed. Presence of buried jobs in a tube does not affect new jobs. |
-
-
-## Setup
-
-### Prerequisites
-
-#### beanstalkc python module
-
-The collector requires the `beanstalkc` python module to be installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/beanstalk.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/beanstalk.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |
-| port | Port to the IP or URL to a beanstalk service. | 11300 | no |
-
-</details>
-
-#### Examples
-
-##### Remote beanstalk server
-
-A basic remote beanstalk server
-
-```yaml
-remote:
- name: 'beanstalk'
- host: '1.2.3.4'
- port: 11300
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local_beanstalk'
- host: '127.0.0.1'
- port: 11300
-
-remote_job:
- name: 'remote_beanstalk'
- host: '192.0.2.1'
- port: 113000
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `beanstalk` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin beanstalk debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/boinc/integrations/boinc.md b/src/collectors/python.d.plugin/boinc/integrations/boinc.md
index 2e5ff5c4f..d5fcac215 100644
--- a/src/collectors/python.d.plugin/boinc/integrations/boinc.md
+++ b/src/collectors/python.d.plugin/boinc/integrations/boinc.md
@@ -179,6 +179,7 @@ remote_job:
### Debug Mode
+
To troubleshoot issues with the `boinc` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -201,4 +202,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin boinc debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `boinc` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep boinc
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep boinc /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep boinc
+```
+
diff --git a/src/collectors/python.d.plugin/ceph/integrations/ceph.md b/src/collectors/python.d.plugin/ceph/integrations/ceph.md
index 2b49a331d..d2584a4d0 100644
--- a/src/collectors/python.d.plugin/ceph/integrations/ceph.md
+++ b/src/collectors/python.d.plugin/ceph/integrations/ceph.md
@@ -169,6 +169,7 @@ local:
### Debug Mode
+
To troubleshoot issues with the `ceph` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -191,4 +192,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin ceph debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `ceph` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep ceph
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep ceph /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep ceph
+```
+
diff --git a/src/collectors/python.d.plugin/changefinder/README.md b/src/collectors/python.d.plugin/changefinder/README.md
deleted file mode 120000
index 0ca704eb1..000000000
--- a/src/collectors/python.d.plugin/changefinder/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/python.d_changefinder.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/changefinder/changefinder.chart.py b/src/collectors/python.d.plugin/changefinder/changefinder.chart.py
deleted file mode 100644
index 2a69cd9f5..000000000
--- a/src/collectors/python.d.plugin/changefinder/changefinder.chart.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: changefinder netdata python.d module
-# Author: andrewm4894
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from json import loads
-import re
-
-from bases.FrameworkServices.UrlService import UrlService
-
-import numpy as np
-import changefinder
-from scipy.stats import percentileofscore
-
-update_every = 5
-disabled_by_default = True
-
-ORDER = [
- 'scores',
- 'flags'
-]
-
-CHARTS = {
- 'scores': {
- 'options': [None, 'ChangeFinder', 'score', 'Scores', 'changefinder.scores', 'line'],
- 'lines': []
- },
- 'flags': {
- 'options': [None, 'ChangeFinder', 'flag', 'Flags', 'changefinder.flags', 'stacked'],
- 'lines': []
- }
-}
-
-DEFAULT_PROTOCOL = 'http'
-DEFAULT_HOST = '127.0.0.1:19999'
-DEFAULT_CHARTS_REGEX = 'system.*'
-DEFAULT_MODE = 'per_chart'
-DEFAULT_CF_R = 0.5
-DEFAULT_CF_ORDER = 1
-DEFAULT_CF_SMOOTH = 15
-DEFAULT_CF_DIFF = False
-DEFAULT_CF_THRESHOLD = 99
-DEFAULT_N_SCORE_SAMPLES = 14400
-DEFAULT_SHOW_SCORES = False
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.protocol = self.configuration.get('protocol', DEFAULT_PROTOCOL)
- self.host = self.configuration.get('host', DEFAULT_HOST)
- self.url = '{}://{}/api/v1/allmetrics?format=json'.format(self.protocol, self.host)
- self.charts_regex = re.compile(self.configuration.get('charts_regex', DEFAULT_CHARTS_REGEX))
- self.charts_to_exclude = self.configuration.get('charts_to_exclude', '').split(',')
- self.mode = self.configuration.get('mode', DEFAULT_MODE)
- self.n_score_samples = int(self.configuration.get('n_score_samples', DEFAULT_N_SCORE_SAMPLES))
- self.show_scores = int(self.configuration.get('show_scores', DEFAULT_SHOW_SCORES))
- self.cf_r = float(self.configuration.get('cf_r', DEFAULT_CF_R))
- self.cf_order = int(self.configuration.get('cf_order', DEFAULT_CF_ORDER))
- self.cf_smooth = int(self.configuration.get('cf_smooth', DEFAULT_CF_SMOOTH))
- self.cf_diff = bool(self.configuration.get('cf_diff', DEFAULT_CF_DIFF))
- self.cf_threshold = float(self.configuration.get('cf_threshold', DEFAULT_CF_THRESHOLD))
- self.collected_dims = {'scores': set(), 'flags': set()}
- self.models = {}
- self.x_latest = {}
- self.scores_latest = {}
- self.scores_samples = {}
-
- def get_score(self, x, model):
- """Update the score for the model based on most recent data, flag if it's percentile passes self.cf_threshold.
- """
-
- # get score
- if model not in self.models:
- # initialise empty model if needed
- self.models[model] = changefinder.ChangeFinder(r=self.cf_r, order=self.cf_order, smooth=self.cf_smooth)
- # if the update for this step fails then just fallback to last known score
- try:
- score = self.models[model].update(x)
- self.scores_latest[model] = score
- except Exception as _:
- score = self.scores_latest.get(model, 0)
- score = 0 if np.isnan(score) else score
-
- # update sample scores used to calculate percentiles
- if model in self.scores_samples:
- self.scores_samples[model].append(score)
- else:
- self.scores_samples[model] = [score]
- self.scores_samples[model] = self.scores_samples[model][-self.n_score_samples:]
-
- # convert score to percentile
- score = percentileofscore(self.scores_samples[model], score)
-
- # flag based on score percentile
- flag = 1 if score >= self.cf_threshold else 0
-
- return score, flag
-
- def validate_charts(self, chart, data, algorithm='absolute', multiplier=1, divisor=1):
- """If dimension not in chart then add it.
- """
- if not self.charts:
- return
-
- for dim in data:
- if dim not in self.collected_dims[chart]:
- self.collected_dims[chart].add(dim)
- self.charts[chart].add_dimension([dim, dim, algorithm, multiplier, divisor])
-
- for dim in list(self.collected_dims[chart]):
- if dim not in data:
- self.collected_dims[chart].remove(dim)
- self.charts[chart].del_dimension(dim, hide=False)
-
- def diff(self, x, model):
- """Take difference of data.
- """
- x_diff = x - self.x_latest.get(model, 0)
- self.x_latest[model] = x
- x = x_diff
- return x
-
- def _get_data(self):
-
- # pull data from self.url
- raw_data = self._get_raw_data()
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
-
- # filter to just the data for the charts specified
- charts_in_scope = list(filter(self.charts_regex.match, raw_data.keys()))
- charts_in_scope = [c for c in charts_in_scope if c not in self.charts_to_exclude]
-
- data_score = {}
- data_flag = {}
-
- # process each chart
- for chart in charts_in_scope:
-
- if self.mode == 'per_chart':
-
- # average dims on chart and run changefinder on that average
- x = [raw_data[chart]['dimensions'][dim]['value'] for dim in raw_data[chart]['dimensions']]
- x = [x for x in x if x is not None]
-
- if len(x) > 0:
-
- x = sum(x) / len(x)
- x = self.diff(x, chart) if self.cf_diff else x
-
- score, flag = self.get_score(x, chart)
- if self.show_scores:
- data_score['{}_score'.format(chart)] = score * 100
- data_flag[chart] = flag
-
- else:
-
- # run changefinder on each individual dim
- for dim in raw_data[chart]['dimensions']:
-
- chart_dim = '{}|{}'.format(chart, dim)
-
- x = raw_data[chart]['dimensions'][dim]['value']
- x = x if x else 0
- x = self.diff(x, chart_dim) if self.cf_diff else x
-
- score, flag = self.get_score(x, chart_dim)
- if self.show_scores:
- data_score['{}_score'.format(chart_dim)] = score * 100
- data_flag[chart_dim] = flag
-
- self.validate_charts('flags', data_flag)
-
- if self.show_scores & len(data_score) > 0:
- data_score['average_score'] = sum(data_score.values()) / len(data_score)
- self.validate_charts('scores', data_score, divisor=100)
-
- data = {**data_score, **data_flag}
-
- return data
diff --git a/src/collectors/python.d.plugin/changefinder/changefinder.conf b/src/collectors/python.d.plugin/changefinder/changefinder.conf
deleted file mode 100644
index 56a681f1e..000000000
--- a/src/collectors/python.d.plugin/changefinder/changefinder.conf
+++ /dev/null
@@ -1,74 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 5
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-
-local:
-
- # A friendly name for this job.
- name: 'local'
-
- # What host to pull data from.
- host: '127.0.0.1:19999'
-
- # What charts to pull data for - A regex like 'system\..*|' or 'system\..*|apps.cpu|apps.mem' etc.
- charts_regex: 'system\..*'
-
- # Charts to exclude, useful if you would like to exclude some specific charts.
- # Note: should be a ',' separated string like 'chart.name,chart.name'.
- charts_to_exclude: ''
-
- # Get ChangeFinder scores 'per_dim' or 'per_chart'.
- mode: 'per_chart'
-
- # Default parameters that can be passed to the changefinder library.
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
-
- # The percentile above which scores will be flagged.
- cf_threshold: 99
-
- # The number of recent scores to use when calculating the percentile of the changefinder score.
- n_score_samples: 14400
-
- # Set to true if you also want to chart the percentile scores in addition to the flags.
- # Mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time.
- show_scores: false
diff --git a/src/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md b/src/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
deleted file mode 100644
index fe370baac..000000000
--- a/src/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
+++ /dev/null
@@ -1,217 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/changefinder/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/changefinder/metadata.yaml"
-sidebar_label: "python.d changefinder"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# python.d changefinder
-
-Plugin: python.d.plugin
-Module: changefinder
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
-perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
-on your Netdata charts and/or dimensions.
-
-
-Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a changepoint score for each chart or dimension you configure it to work on. This is an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap to compute at each step of data collection (see the notes section below for more details) and it should scale fairly well to work on lots of charts or hosts (if running on a parent node for example).
-### Notes - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
- typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
- this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
- score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
- already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
- should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
- approaches which need some initial window of time before they can be useful.
-- As this collector does most of the work in Python itself, you may want to try it out first on a test or development
- system to get a sense of its performance characteristics on a node similar to where you would like to use it.
-- On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
- typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_changefinder`) of ~30ms.
- - Typically ~1% additional cpu usage.
- - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default this collector will work over all `system.*` charts.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per python.d changefinder instance
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| changefinder.scores | a dimension per chart | score |
-| changefinder.flags | a dimension per chart | flag |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Python Requirements
-
-This collector will only work with Python 3 and requires the packages below be installed.
-
-```bash
-# become netdata user
-sudo su -s /bin/bash netdata
-# install required packages for the netdata user
-pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
-```
-
-**Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
-of your `netdata.conf` file.
-
-```yaml
-[ plugin:python.d ]
- # update every = 1
- command options = -ppython3
-```
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/changefinder.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/changefinder.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
-| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |
-| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |
-| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |
-| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |
-| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |
-| cf_threshold | the percentile above which scores will be flagged. | 99 | no |
-| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |
-| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |
-
-</details>
-
-#### Examples
-
-##### Default
-
-Default configuration.
-
-```yaml
-local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: ''
- mode: 'per_chart'
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
- cf_threshold: 99
- n_score_samples: 14400
- show_scores: false
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `changefinder` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin changefinder debug trace
- ```
-
-### Debug Mode
-
-
-
-### Log Messages
-
-
-
-
diff --git a/src/collectors/python.d.plugin/changefinder/metadata.yaml b/src/collectors/python.d.plugin/changefinder/metadata.yaml
deleted file mode 100644
index 170d9146a..000000000
--- a/src/collectors/python.d.plugin/changefinder/metadata.yaml
+++ /dev/null
@@ -1,212 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: changefinder
- monitored_instance:
- name: python.d changefinder
- link: ""
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - change detection
- - anomaly detection
- - machine learning
- - ml
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector uses the Python [changefinder](https://github.com/shunsukeaihara/changefinder) library to
- perform [online](https://en.wikipedia.org/wiki/Online_machine_learning) [changepoint detection](https://en.wikipedia.org/wiki/Change_detection)
- on your Netdata charts and/or dimensions.
- method_description: >
- Instead of this collector just _collecting_ data, it also does some computation on the data it collects to return a
- changepoint score for each chart or dimension you configure it to work on. This is
- an [online](https://en.wikipedia.org/wiki/Online_machine_learning) machine learning algorithm so there is no batch step
- to train the model, instead it evolves over time as more data arrives. That makes this particular algorithm quite cheap
- to compute at each step of data collection (see the notes section below for more details) and it should scale fairly
- well to work on lots of charts or hosts (if running on a parent node for example).
-
- ### Notes
- - It may take an hour or two (depending on your choice of `n_score_samples`) for the collector to 'settle' into it's
- typical behaviour in terms of the trained models and scores you will see in the normal running of your node. Mainly
- this is because it can take a while to build up a proper distribution of previous scores in over to convert the raw
- score returned by the ChangeFinder algorithm into a percentile based on the most recent `n_score_samples` that have
- already been produced. So when you first turn the collector on, it will have a lot of flags in the beginning and then
- should 'settle down' once it has built up enough history. This is a typical characteristic of online machine learning
- approaches which need some initial window of time before they can be useful.
- - As this collector does most of the work in Python itself, you may want to try it out first on a test or development
- system to get a sense of its performance characteristics on a node similar to where you would like to use it.
- - On a development n1-standard-2 (2 vCPUs, 7.5 GB memory) vm running Ubuntu 18.04 LTS and not doing any work some of the
- typical performance characteristics we saw from running this collector (with defaults) were:
- - A runtime (`netdata.runtime_changefinder`) of ~30ms.
- - Typically ~1% additional cpu usage.
- - About ~85mb of ram (`apps.mem`) being continually used by the `python.d.plugin` under default configuration.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "By default this collector will work over all `system.*` charts."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Python Requirements
- description: |
- This collector will only work with Python 3 and requires the packages below be installed.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # install required packages for the netdata user
- pip3 install --user numpy==1.19.5 changefinder==0.03 scipy==1.5.4
- ```
-
- **Note**: if you need to tell Netdata to use Python 3 then you can pass the below command in the python plugin section
- of your `netdata.conf` file.
-
- ```yaml
- [ plugin:python.d ]
- # update every = 1
- command options = -ppython3
- ```
- configuration:
- file:
- name: python.d/changefinder.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: charts_regex
- description: what charts to pull data for - A regex like `system\..*|` or `system\..*|apps.cpu|apps.mem` etc.
- default_value: "system\\..*"
- required: true
- - name: charts_to_exclude
- description: |
- charts to exclude, useful if you would like to exclude some specific charts.
- note: should be a ',' separated string like 'chart.name,chart.name'.
- default_value: ""
- required: false
- - name: mode
- description: get ChangeFinder scores 'per_dim' or 'per_chart'.
- default_value: "per_chart"
- required: true
- - name: cf_r
- description: default parameters that can be passed to the changefinder library.
- default_value: 0.5
- required: false
- - name: cf_order
- description: default parameters that can be passed to the changefinder library.
- default_value: 1
- required: false
- - name: cf_smooth
- description: default parameters that can be passed to the changefinder library.
- default_value: 15
- required: false
- - name: cf_threshold
- description: the percentile above which scores will be flagged.
- default_value: 99
- required: false
- - name: n_score_samples
- description: the number of recent scores to use when calculating the percentile of the changefinder score.
- default_value: 14400
- required: false
- - name: show_scores
- description: |
- set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time)
- default_value: false
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Default
- description: Default configuration.
- folding:
- enabled: false
- config: |
- local:
- name: 'local'
- host: '127.0.0.1:19999'
- charts_regex: 'system\..*'
- charts_to_exclude: ''
- mode: 'per_chart'
- cf_r: 0.5
- cf_order: 1
- cf_smooth: 15
- cf_threshold: 99
- n_score_samples: 14400
- show_scores: false
- troubleshooting:
- problems:
- list:
- - name: "Debug Mode"
- description: |
- If you would like to log in as `netdata` user and run the collector in debug mode to see more detail.
-
- ```bash
- # become netdata user
- sudo su -s /bin/bash netdata
- # run collector in debug using `nolock` option if netdata is already running the collector itself.
- /usr/libexec/netdata/plugins.d/python.d.plugin changefinder debug trace nolock
- ```
- - name: "Log Messages"
- description: |
- To see any relevant log messages you can use a command like below.
-
- ```bash
- grep 'changefinder' /var/log/netdata/error.log
- grep 'changefinder' /var/log/netdata/collector.log
- ```
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: changefinder.scores
- description: ChangeFinder
- unit: "score"
- chart_type: line
- dimensions:
- - name: a dimension per chart
- - name: changefinder.flags
- description: ChangeFinder
- unit: "flag"
- chart_type: stacked
- dimensions:
- - name: a dimension per chart
diff --git a/src/collectors/python.d.plugin/dovecot/dovecot.chart.py b/src/collectors/python.d.plugin/dovecot/dovecot.chart.py
deleted file mode 100644
index dfaef28b5..000000000
--- a/src/collectors/python.d.plugin/dovecot/dovecot.chart.py
+++ /dev/null
@@ -1,143 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: dovecot netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-UNIX_SOCKET = '/var/run/dovecot/stats'
-
-ORDER = [
- 'sessions',
- 'logins',
- 'commands',
- 'faults',
- 'context_switches',
- 'io',
- 'net',
- 'syscalls',
- 'lookup',
- 'cache',
- 'auth',
- 'auth_cache'
-]
-
-CHARTS = {
- 'sessions': {
- 'options': [None, 'Dovecot Active Sessions', 'number', 'sessions', 'dovecot.sessions', 'line'],
- 'lines': [
- ['num_connected_sessions', 'active sessions', 'absolute']
- ]
- },
- 'logins': {
- 'options': [None, 'Dovecot Logins', 'number', 'logins', 'dovecot.logins', 'line'],
- 'lines': [
- ['num_logins', 'logins', 'absolute']
- ]
- },
- 'commands': {
- 'options': [None, 'Dovecot Commands', 'commands', 'commands', 'dovecot.commands', 'line'],
- 'lines': [
- ['num_cmds', 'commands', 'absolute']
- ]
- },
- 'faults': {
- 'options': [None, 'Dovecot Page Faults', 'faults', 'page faults', 'dovecot.faults', 'line'],
- 'lines': [
- ['min_faults', 'minor', 'absolute'],
- ['maj_faults', 'major', 'absolute']
- ]
- },
- 'context_switches': {
- 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches',
- 'line'],
- 'lines': [
- ['vol_cs', 'voluntary', 'absolute'],
- ['invol_cs', 'involuntary', 'absolute']
- ]
- },
- 'io': {
- 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
- 'lines': [
- ['disk_input', 'read', 'incremental', 1, 1024],
- ['disk_output', 'write', 'incremental', -1, 1024]
- ]
- },
- 'net': {
- 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
- 'lines': [
- ['read_bytes', 'read', 'incremental', 8, 1000],
- ['write_bytes', 'write', 'incremental', -8, 1000]
- ]
- },
- 'syscalls': {
- 'options': [None, 'Dovecot Number of SysCalls', 'syscalls/s', 'system', 'dovecot.syscalls', 'line'],
- 'lines': [
- ['read_count', 'read', 'incremental'],
- ['write_count', 'write', 'incremental']
- ]
- },
- 'lookup': {
- 'options': [None, 'Dovecot Lookups', 'number/s', 'lookups', 'dovecot.lookup', 'stacked'],
- 'lines': [
- ['mail_lookup_path', 'path', 'incremental'],
- ['mail_lookup_attr', 'attr', 'incremental']
- ]
- },
- 'cache': {
- 'options': [None, 'Dovecot Cache Hits', 'hits/s', 'cache', 'dovecot.cache', 'line'],
- 'lines': [
- ['mail_cache_hits', 'hits', 'incremental']
- ]
- },
- 'auth': {
- 'options': [None, 'Dovecot Authentications', 'attempts', 'logins', 'dovecot.auth', 'stacked'],
- 'lines': [
- ['auth_successes', 'ok', 'absolute'],
- ['auth_failures', 'failed', 'absolute']
- ]
- },
- 'auth_cache': {
- 'options': [None, 'Dovecot Authentication Cache', 'number', 'cache', 'dovecot.auth_cache', 'stacked'],
- 'lines': [
- ['auth_cache_hits', 'hit', 'absolute'],
- ['auth_cache_misses', 'miss', 'absolute']
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.host = None # localhost
- self.port = None # 24242
- self.unix_socket = UNIX_SOCKET
- self.request = 'EXPORT\tglobal\r\n'
-
- def _get_data(self):
- """
- Format data received from socket
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- except (ValueError, AttributeError):
- return None
-
- if raw is None:
- self.debug('dovecot returned no data')
- return None
-
- data = raw.split('\n')[:2]
- desc = data[0].split('\t')
- vals = data[1].split('\t')
- ret = dict()
- for i, _ in enumerate(desc):
- try:
- ret[str(desc[i])] = int(vals[i])
- except ValueError:
- continue
- return ret or None
diff --git a/src/collectors/python.d.plugin/dovecot/dovecot.conf b/src/collectors/python.d.plugin/dovecot/dovecot.conf
deleted file mode 100644
index 451dbc9ac..000000000
--- a/src/collectors/python.d.plugin/dovecot/dovecot.conf
+++ /dev/null
@@ -1,98 +0,0 @@
-# netdata python.d.plugin configuration for dovecot
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, dovecot also supports the following:
-#
-# socket: 'path/to/dovecot/stats'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 24242
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 24242
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 24242
-
-localsocket:
- name : 'local'
- socket : '/var/run/dovecot/stats'
-
-localsocket_old:
- name : 'local'
- socket : '/var/run/dovecot/old-stats'
-
diff --git a/src/collectors/python.d.plugin/dovecot/integrations/dovecot.md b/src/collectors/python.d.plugin/dovecot/integrations/dovecot.md
deleted file mode 100644
index aaf207e85..000000000
--- a/src/collectors/python.d.plugin/dovecot/integrations/dovecot.md
+++ /dev/null
@@ -1,197 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/dovecot/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/dovecot/metadata.yaml"
-sidebar_label: "Dovecot"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Dovecot
-
-
-<img src="https://netdata.cloud/img/dovecot.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: dovecot
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.
-
-It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Dovecot instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| dovecot.sessions | active sessions | number |
-| dovecot.logins | logins | number |
-| dovecot.commands | commands | commands |
-| dovecot.faults | minor, major | faults |
-| dovecot.context_switches | voluntary, involuntary | switches |
-| dovecot.io | read, write | KiB/s |
-| dovecot.net | read, write | kilobits/s |
-| dovecot.syscalls | read, write | syscalls/s |
-| dovecot.lookup | path, attr | number/s |
-| dovecot.cache | hits | hits/s |
-| dovecot.auth | ok, failed | attempts |
-| dovecot.auth_cache | hit, miss | number |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Dovecot configuration
-
-The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/dovecot.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/dovecot.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |
-| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |
-| port | Used in combination with host, configures the port devcot listens to. | | no |
-
-</details>
-
-#### Examples
-
-##### Local TCP
-
-A basic TCP configuration.
-
-<details open><summary>Config</summary>
-
-```yaml
-localtcpip:
- name: 'local'
- host: '127.0.0.1'
- port: 24242
-
-```
-</details>
-
-##### Local socket
-
-A basic local socket configuration
-
-<details open><summary>Config</summary>
-
-```yaml
-localsocket:
- name: 'local'
- socket: '/var/run/dovecot/stats'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `dovecot` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin dovecot debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/dovecot/metadata.yaml b/src/collectors/python.d.plugin/dovecot/metadata.yaml
deleted file mode 100644
index b247da846..000000000
--- a/src/collectors/python.d.plugin/dovecot/metadata.yaml
+++ /dev/null
@@ -1,207 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: dovecot
- monitored_instance:
- name: Dovecot
- link: 'https://www.dovecot.org/'
- categories:
- - data-collection.mail-servers
- icon_filename: 'dovecot.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - dovecot
- - imap
- - mail
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Dovecot metrics about sessions, logins, commands, page faults and more.'
- method_description: 'It uses the dovecot socket and executes the `EXPORT global` command to get the statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'If no configuration is given, the collector will attempt to connect to dovecot using unix socket localized in `/var/run/dovecot/stats`'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Dovecot configuration'
- description: The Dovecot UNIX socket should have R/W permissions for user netdata, or Dovecot should be configured with a TCP/IP socket.
- configuration:
- file:
- name: python.d/dovecot.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: socket
- description: Use this socket to communicate with Devcot
- default_value: /var/run/dovecot/stats
- required: false
- - name: host
- description: Instead of using a socket, you can point the collector to an ip for devcot statistics.
- default_value: ''
- required: false
- - name: port
- description: Used in combination with host, configures the port devcot listens to.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local TCP
- description: A basic TCP configuration.
- config: |
- localtcpip:
- name: 'local'
- host: '127.0.0.1'
- port: 24242
- - name: Local socket
- description: A basic local socket configuration
- config: |
- localsocket:
- name: 'local'
- socket: '/var/run/dovecot/stats'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: dovecot.sessions
- description: Dovecot Active Sessions
- unit: "number"
- chart_type: line
- dimensions:
- - name: active sessions
- - name: dovecot.logins
- description: Dovecot Logins
- unit: "number"
- chart_type: line
- dimensions:
- - name: logins
- - name: dovecot.commands
- description: Dovecot Commands
- unit: "commands"
- chart_type: line
- dimensions:
- - name: commands
- - name: dovecot.faults
- description: Dovecot Page Faults
- unit: "faults"
- chart_type: line
- dimensions:
- - name: minor
- - name: major
- - name: dovecot.context_switches
- description: Dovecot Context Switches
- unit: "switches"
- chart_type: line
- dimensions:
- - name: voluntary
- - name: involuntary
- - name: dovecot.io
- description: Dovecot Disk I/O
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: dovecot.net
- description: Dovecot Network Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
- - name: dovecot.syscalls
- description: Dovecot Number of SysCalls
- unit: "syscalls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: dovecot.lookup
- description: Dovecot Lookups
- unit: "number/s"
- chart_type: stacked
- dimensions:
- - name: path
- - name: attr
- - name: dovecot.cache
- description: Dovecot Cache Hits
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hits
- - name: dovecot.auth
- description: Dovecot Authentications
- unit: "attempts"
- chart_type: stacked
- dimensions:
- - name: ok
- - name: failed
- - name: dovecot.auth_cache
- description: Dovecot Authentication Cache
- unit: "number"
- chart_type: stacked
- dimensions:
- - name: hit
- - name: miss
diff --git a/src/collectors/python.d.plugin/example/README.md b/src/collectors/python.d.plugin/example/README.md
deleted file mode 120000
index 55877a99a..000000000
--- a/src/collectors/python.d.plugin/example/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/example_collector.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/example/example.chart.py b/src/collectors/python.d.plugin/example/example.chart.py
deleted file mode 100644
index d6c0b6658..000000000
--- a/src/collectors/python.d.plugin/example/example.chart.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: example netdata python.d module
-# Author: Put your name here (your github login)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from random import SystemRandom
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-priority = 90000
-
-ORDER = [
- 'random',
-]
-
-CHARTS = {
- 'random': {
- 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],
- 'lines': [
- ['random1']
- ]
- }
-}
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.random = SystemRandom()
- self.num_lines = self.configuration.get('num_lines', 4)
- self.lower = self.configuration.get('lower', 0)
- self.upper = self.configuration.get('upper', 100)
-
- @staticmethod
- def check():
- return True
-
- def get_data(self):
- data = dict()
-
- for i in range(0, self.num_lines):
- dimension_id = ''.join(['random', str(i)])
-
- if dimension_id not in self.charts['random']:
- self.charts['random'].add_dimension([dimension_id])
-
- data[dimension_id] = self.random.randint(self.lower, self.upper)
-
- return data
diff --git a/src/collectors/python.d.plugin/example/example.conf b/src/collectors/python.d.plugin/example/example.conf
deleted file mode 100644
index 31261b840..000000000
--- a/src/collectors/python.d.plugin/example/example.conf
+++ /dev/null
@@ -1,87 +0,0 @@
-# netdata python.d.plugin configuration for example
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear on the dashboard
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# num_lines: 4 # the number of lines to create
-# lower: 0 # the lower bound of numbers to randomly sample from
-# upper: 100 # the upper bound of numbers to randomly sample from
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-four_lines:
- name: "Four Lines" # the JOB's name as it will appear on the dashboard
- update_every: 1 # the JOB's data collection frequency
- priority: 60000 # the JOB's order on the dashboard
- penalty: yes # the JOB's penalty
- autodetection_retry: 0 # the JOB's re-check interval in seconds
- num_lines: 4 # the number of lines to create
- lower: 0 # the lower bound of numbers to randomly sample from
- upper: 100 # the upper bound of numbers to randomly sample from
-
-# if you wanted to make another job to run in addition to the one above then
-# you would just uncomment the job configuration below.
-# two_lines:
-# name: "Two Lines" # the JOB's name as it will appear on the dashboard
-# num_lines: 2 # the number of lines to create
-# lower: 50 # the lower bound of numbers to randomly sample from
-# upper: 75 # the upper bound of numbers to randomly sample from
diff --git a/src/collectors/python.d.plugin/example/integrations/example_collector.md b/src/collectors/python.d.plugin/example/integrations/example_collector.md
deleted file mode 100644
index 03c0165b4..000000000
--- a/src/collectors/python.d.plugin/example/integrations/example_collector.md
+++ /dev/null
@@ -1,171 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/example/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/example/metadata.yaml"
-sidebar_label: "Example collector"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Other"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Example collector
-
-Plugin: python.d.plugin
-Module: example
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Example collector that generates some random numbers as metrics.
-
-If you want to write your own collector, read our [writing a new Python module](/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
-
-
-The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Example collector instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| example.random | random | number |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/example.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/example.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| num_lines | The number of lines to create. | 4 | no |
-| lower | The lower bound of numbers to randomly sample from. | 0 | no |
-| upper | The upper bound of numbers to randomly sample from. | 100 | no |
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-four_lines:
- name: "Four Lines"
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
- num_lines: 4
- lower: 0
- upper: 100
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `example` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin example debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/example/metadata.yaml b/src/collectors/python.d.plugin/example/metadata.yaml
deleted file mode 100644
index 6b2401366..000000000
--- a/src/collectors/python.d.plugin/example/metadata.yaml
+++ /dev/null
@@ -1,138 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: example
- monitored_instance:
- name: Example collector
- link: /src/collectors/python.d.plugin/example/README.md
- categories:
- - data-collection.other
- icon_filename: ""
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - example
- - netdata
- - python
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- Example collector that generates some random numbers as metrics.
-
- If you want to write your own collector, read our [writing a new Python module](/src/collectors/python.d.plugin/README.md#how-to-write-a-new-module) tutorial.
- method_description: |
- The `get_data()` function uses `random.randint()` to generate a random number which will be collected as a metric.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: python.d/example.conf
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: num_lines
- description: The number of lines to create.
- default_value: 4
- required: false
- - name: lower
- description: The lower bound of numbers to randomly sample from.
- default_value: 0
- required: false
- - name: upper
- description: The upper bound of numbers to randomly sample from.
- default_value: 100
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: Config
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- four_lines:
- name: "Four Lines"
- update_every: 1
- priority: 60000
- penalty: yes
- autodetection_retry: 0
- num_lines: 4
- lower: 0
- upper: 100
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: example.random
- description: A random number
- unit: number
- chart_type: line
- dimensions:
- - name: random
diff --git a/src/collectors/python.d.plugin/exim/exim.chart.py b/src/collectors/python.d.plugin/exim/exim.chart.py
deleted file mode 100644
index 7238a1bea..000000000
--- a/src/collectors/python.d.plugin/exim/exim.chart.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: exim netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-EXIM_COMMAND = 'exim -bpc'
-
-ORDER = [
- 'qemails',
-]
-
-CHARTS = {
- 'qemails': {
- 'options': [None, 'Exim Queue Emails', 'emails', 'queue', 'exim.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = EXIM_COMMAND
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- return {'emails': int(self._get_raw_data()[0])}
- except (ValueError, AttributeError):
- return None
diff --git a/src/collectors/python.d.plugin/exim/exim.conf b/src/collectors/python.d.plugin/exim/exim.conf
deleted file mode 100644
index 3b7e65922..000000000
--- a/src/collectors/python.d.plugin/exim/exim.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# netdata python.d.plugin configuration for exim
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# exim is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, exim also supports the following:
-#
-# command: 'exim -bpc' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# REQUIRED exim CONFIGURATION
-#
-# netdata will query exim as user netdata.
-# By default exim will refuse to respond.
-#
-# To allow querying exim as non-admin user, please set the following
-# to your exim configuration:
-#
-# queue_list_requires_admin = false
-#
-# Your exim configuration should be in
-#
-# /etc/exim/exim4.conf
-# or
-# /etc/exim4/conf.d/main/000_local_options
-#
-# Please consult your distribution information to find the exact file.
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- command: 'exim -bpc'
diff --git a/src/collectors/python.d.plugin/exim/integrations/exim.md b/src/collectors/python.d.plugin/exim/integrations/exim.md
deleted file mode 100644
index a64a5449b..000000000
--- a/src/collectors/python.d.plugin/exim/integrations/exim.md
+++ /dev/null
@@ -1,181 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/exim/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/exim/metadata.yaml"
-sidebar_label: "Exim"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Exim
-
-
-<img src="https://netdata.cloud/img/exim.jpg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: exim
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Exim mail queue.
-
-It uses the `exim` command line binary to get the statistics.
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Exim instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| exim.qemails | emails | emails |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Exim configuration - local installation
-
-The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
-
-1. Edit the `exim` configuration with your preferred editor and add:
-`queue_list_requires_admin = false`
-2. Restart `exim` and Netdata
-
-
-#### Exim configuration - WHM (CPanel) server
-
-On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
-
-1. Login to WHM
-2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
-3. Scroll down to the button **Add additional configuration setting** and click on it.
-4. In the new dropdown which will appear above we need to find and choose:
-`queue_list_requires_admin` and set to `false`
-5. Scroll to the end and click the **Save** button.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/exim.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/exim.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| command | Path and command to the `exim` binary | exim -bpc | no |
-
-</details>
-
-#### Examples
-
-##### Local exim install
-
-A basic local exim install
-
-```yaml
-local:
- command: 'exim -bpc'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `exim` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin exim debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/exim/metadata.yaml b/src/collectors/python.d.plugin/exim/metadata.yaml
deleted file mode 100644
index a8be02d99..000000000
--- a/src/collectors/python.d.plugin/exim/metadata.yaml
+++ /dev/null
@@ -1,132 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: exim
- monitored_instance:
- name: Exim
- link: "https://www.exim.org/"
- categories:
- - data-collection.mail-servers
- icon_filename: "exim.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - exim
- - mail
- - server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors Exim mail queue."
- method_description: "It uses the `exim` command line binary to get the statistics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "Assuming setup prerequisites are met, the collector will try to gather statistics using the method described above, even without any configuration."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Exim configuration - local installation"
- description: |
- The module uses the `exim` binary, which can only be executed as root by default. We need to allow other users to `exim` binary. We solve that adding `queue_list_requires_admin` statement in exim configuration and set to `false`, because it is `true` by default. On many Linux distributions, the default location of `exim` configuration is in `/etc/exim.conf`.
-
- 1. Edit the `exim` configuration with your preferred editor and add:
- `queue_list_requires_admin = false`
- 2. Restart `exim` and Netdata
- - title: "Exim configuration - WHM (CPanel) server"
- description: |
- On a WHM server, you can reconfigure `exim` over the WHM interface with the following steps.
-
- 1. Login to WHM
- 2. Navigate to Service Configuration --> Exim Configuration Manager --> tab Advanced Editor
- 3. Scroll down to the button **Add additional configuration setting** and click on it.
- 4. In the new dropdown which will appear above we need to find and choose:
- `queue_list_requires_admin` and set to `false`
- 5. Scroll to the end and click the **Save** button.
- configuration:
- file:
- name: python.d/exim.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: command
- description: Path and command to the `exim` binary
- default_value: "exim -bpc"
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local exim install
- description: A basic local exim install
- config: |
- local:
- command: 'exim -bpc'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: exim.qemails
- description: Exim Queue Emails
- unit: "emails"
- chart_type: line
- dimensions:
- - name: emails
diff --git a/src/collectors/python.d.plugin/gearman/gearman.chart.py b/src/collectors/python.d.plugin/gearman/gearman.chart.py
deleted file mode 100644
index 5e280a4d8..000000000
--- a/src/collectors/python.d.plugin/gearman/gearman.chart.py
+++ /dev/null
@@ -1,243 +0,0 @@
-# Description: dovecot netdata python.d module
-# Author: Kyle Agronick (agronick)
-# SPDX-License-Identifier: GPL-3.0+
-
-# Gearman Netdata Plugin
-
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-CHARTS = {
- 'total_workers': {
- 'options': [None, 'Total Jobs', 'Jobs', 'Total Jobs', 'gearman.total_jobs', 'line'],
- 'lines': [
- ['total_pending', 'Pending', 'absolute'],
- ['total_running', 'Running', 'absolute'],
- ]
- },
-}
-
-
-def job_chart_template(job_name):
- return {
- 'options': [None, job_name, 'Jobs', 'Activity by Job', 'gearman.single_job', 'stacked'],
- 'lines': [
- ['{0}_pending'.format(job_name), 'Pending', 'absolute'],
- ['{0}_idle'.format(job_name), 'Idle', 'absolute'],
- ['{0}_running'.format(job_name), 'Running', 'absolute'],
- ]
- }
-
-
-def build_result_dict(job):
- """
- Get the status for each job
- :return: dict
- """
-
- total, running, available = job['metrics']
-
- idle = available - running
- pending = total - running
-
- return {
- '{0}_pending'.format(job['job_name']): pending,
- '{0}_idle'.format(job['job_name']): idle,
- '{0}_running'.format(job['job_name']): running,
- }
-
-
-def parse_worker_data(job):
- job_name = job[0]
- job_metrics = job[1:]
-
- return {
- 'job_name': job_name,
- 'metrics': job_metrics,
- }
-
-
-class GearmanReadException(BaseException):
- pass
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.request = "status\n"
- self._keep_alive = True
-
- self.host = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 4730)
-
- self.tls = self.configuration.get('tls', False)
- self.cert = self.configuration.get('cert', None)
- self.key = self.configuration.get('key', None)
-
- self.active_jobs = set()
- self.definitions = deepcopy(CHARTS)
- self.order = ['total_workers']
-
- def _get_data(self):
- """
- Format data received from socket
- :return: dict
- """
-
- try:
- active_jobs = self.get_active_jobs()
- except GearmanReadException:
- return None
-
- found_jobs, job_data = self.process_jobs(active_jobs)
- self.remove_stale_jobs(found_jobs)
- return job_data
-
- def get_active_jobs(self):
- active_jobs = []
-
- for job in self.get_worker_data():
- parsed_job = parse_worker_data(job)
-
- # Gearman does not clean up old jobs
- # We only care about jobs that have
- # some relevant data
- if not any(parsed_job['metrics']):
- continue
-
- active_jobs.append(parsed_job)
-
- return active_jobs
-
- def get_worker_data(self):
- """
- Split the data returned from Gearman
- into a list of lists
-
- This returns the same output that you
- would get from a gearadmin --status
- command.
-
- Example output returned from
- _get_raw_data():
- prefix generic_worker4 78 78 500
- generic_worker2 78 78 500
- generic_worker3 0 0 760
- generic_worker1 0 0 500
-
- :return: list
- """
-
- try:
- raw = self._get_raw_data()
- except (ValueError, AttributeError):
- raise GearmanReadException()
-
- if raw is None:
- self.debug("Gearman returned no data")
- raise GearmanReadException()
-
- workers = list()
-
- for line in raw.splitlines()[:-1]:
- parts = line.split()
- if not parts:
- continue
-
- name = '_'.join(parts[:-3])
- try:
- values = [int(w) for w in parts[-3:]]
- except ValueError:
- continue
-
- w = [name]
- w.extend(values)
- workers.append(w)
-
- return workers
-
- def process_jobs(self, active_jobs):
-
- output = {
- 'total_pending': 0,
- 'total_idle': 0,
- 'total_running': 0,
- }
- found_jobs = set()
-
- for parsed_job in active_jobs:
-
- job_name = self.add_job(parsed_job)
- found_jobs.add(job_name)
- job_data = build_result_dict(parsed_job)
-
- for sum_value in ('pending', 'running', 'idle'):
- output['total_{0}'.format(sum_value)] += job_data['{0}_{1}'.format(job_name, sum_value)]
-
- output.update(job_data)
-
- return found_jobs, output
-
- def remove_stale_jobs(self, active_job_list):
- """
- Removes jobs that have no workers, pending jobs,
- or running jobs
- :param active_job_list: The latest list of active jobs
- :type active_job_list: iterable
- :return: None
- """
-
- for to_remove in self.active_jobs - active_job_list:
- self.remove_job(to_remove)
-
- def add_job(self, parsed_job):
- """
- Adds a job to the list of active jobs
- :param parsed_job: A parsed job dict
- :type parsed_job: dict
- :return: None
- """
-
- def add_chart(job_name):
- """
- Adds a new job chart
- :param job_name: The name of the job to add
- :type job_name: string
- :return: None
- """
-
- job_key = 'job_{0}'.format(job_name)
- template = job_chart_template(job_name)
- new_chart = self.charts.add_chart([job_key] + template['options'])
- for dimension in template['lines']:
- new_chart.add_dimension(dimension)
-
- if parsed_job['job_name'] not in self.active_jobs:
- add_chart(parsed_job['job_name'])
- self.active_jobs.add(parsed_job['job_name'])
-
- return parsed_job['job_name']
-
- def remove_job(self, job_name):
- """
- Removes a job to the list of active jobs
- :param job_name: The name of the job to remove
- :type job_name: string
- :return: None
- """
-
- def remove_chart(job_name):
- """
- Removes a job chart
- :param job_name: The name of the job to remove
- :type job_name: string
- :return: None
- """
-
- job_key = 'job_{0}'.format(job_name)
- self.charts[job_key].obsolete()
- del self.charts[job_key]
-
- remove_chart(job_name)
- self.active_jobs.remove(job_name)
diff --git a/src/collectors/python.d.plugin/gearman/gearman.conf b/src/collectors/python.d.plugin/gearman/gearman.conf
deleted file mode 100644
index 635e893ef..000000000
--- a/src/collectors/python.d.plugin/gearman/gearman.conf
+++ /dev/null
@@ -1,75 +0,0 @@
-# netdata python.d.plugin configuration for gearman
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, gearman also supports the following:
-#
-# host: localhost # The host running the Gearman server
-# port: 4730 # Port of the Gearman server
-# tls: no # Whether to use TLS or not
-# cert: /path/to/cert # Path to cert if using TLS
-# key: /path/to/key # Path to key if using TLS
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOB
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 4730 \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/gearman/integrations/gearman.md b/src/collectors/python.d.plugin/gearman/integrations/gearman.md
deleted file mode 100644
index 717b0dcad..000000000
--- a/src/collectors/python.d.plugin/gearman/integrations/gearman.md
+++ /dev/null
@@ -1,210 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/gearman/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/gearman/metadata.yaml"
-sidebar_label: "Gearman"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Distributed Computing Systems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Gearman
-
-
-<img src="https://netdata.cloud/img/gearman.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: gearman
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management.
-
-This collector connects to a Gearman instance via either TCP or unix socket.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Gearman instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| gearman.total_jobs | Pending, Running | Jobs |
-
-### Per gearman job
-
-Metrics related to Gearman jobs. Each job produces its own set of the following metrics.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| gearman.single_job | Pending, Idle, Runnning | Jobs |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ gearman_workers_queued ](https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf) | gearman.single_job | average number of queued jobs over the last 10 minutes |
-
-
-## Setup
-
-### Prerequisites
-
-#### Socket permissions
-
-The gearman UNIX socket should have read permission for user netdata.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/gearman.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/gearman.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | URL or IP where gearman is running. | localhost | no |
-| port | Port of URL or IP where gearman is running. | 4730 | no |
-| tls | Use tls to connect to gearman. | false | no |
-| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |
-| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |
-
-</details>
-
-#### Examples
-
-##### Local gearman service
-
-A basic host and port gearman configuration for localhost.
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
-remote:
- name: 'remote'
- host: '192.0.2.1'
- port: 4730
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `gearman` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin gearman debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/gearman/metadata.yaml b/src/collectors/python.d.plugin/gearman/metadata.yaml
deleted file mode 100644
index 4ab9c12ef..000000000
--- a/src/collectors/python.d.plugin/gearman/metadata.yaml
+++ /dev/null
@@ -1,168 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: gearman
- monitored_instance:
- name: Gearman
- link: "http://gearman.org/"
- categories:
- - data-collection.distributed-computing-systems
- icon_filename: "gearman.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - gearman
- - gearman job server
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Gearman metrics for proficient system task distribution. Track job counts, worker statuses, and queue lengths for effective distributed task management."
- method_description: "This collector connects to a Gearman instance via either TCP or unix socket."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "When no configuration file is found, the collector tries to connect to TCP/IP socket: localhost:4730."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "Socket permissions"
- description: The gearman UNIX socket should have read permission for user netdata.
- configuration:
- file:
- name: python.d/gearman.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: URL or IP where gearman is running.
- default_value: "localhost"
- required: false
- - name: port
- description: Port of URL or IP where gearman is running.
- default_value: "4730"
- required: false
- - name: tls
- description: Use tls to connect to gearman.
- default_value: "false"
- required: false
- - name: cert
- description: Provide a certificate file if needed to connect to a TLS gearman instance.
- default_value: ""
- required: false
- - name: key
- description: Provide a key file if needed to connect to a TLS gearman instance.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local gearman service
- description: A basic host and port gearman configuration for localhost.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 4730
-
- remote:
- name: 'remote'
- host: '192.0.2.1'
- port: 4730
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: gearman_workers_queued
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/gearman.conf
- metric: gearman.single_job
- info: average number of queued jobs over the last 10 minutes
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: gearman.total_jobs
- description: Total Jobs
- unit: "Jobs"
- chart_type: line
- dimensions:
- - name: Pending
- - name: Running
- - name: gearman job
- description: "Metrics related to Gearman jobs. Each job produces its own set of the following metrics."
- labels: []
- metrics:
- - name: gearman.single_job
- description: "{job_name}"
- unit: "Jobs"
- chart_type: stacked
- dimensions:
- - name: Pending
- - name: Idle
- - name: Runnning
diff --git a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
index cbe7f265f..8f086765e 100644
--- a/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
+++ b/src/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
@@ -310,6 +310,7 @@ app1:
### Debug Mode
+
To troubleshoot issues with the `go_expvar` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -332,4 +333,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin go_expvar debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `go_expvar` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep go_expvar
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep go_expvar /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep go_expvar
+```
+
diff --git a/src/collectors/python.d.plugin/haproxy/metadata.yaml b/src/collectors/python.d.plugin/haproxy/metadata.yaml
index f389b066e..e748a294c 100644
--- a/src/collectors/python.d.plugin/haproxy/metadata.yaml
+++ b/src/collectors/python.d.plugin/haproxy/metadata.yaml
@@ -1,5 +1,5 @@
# This collector will not appear in documentation, as the go version is preferred,
-# /src/go/collectors/go.d.plugin/modules/haproxy/README.md
+# /src/go/plugin/go.d/modules/haproxy/README.md
#
#
# meta:
diff --git a/src/collectors/python.d.plugin/icecast/icecast.chart.py b/src/collectors/python.d.plugin/icecast/icecast.chart.py
deleted file mode 100644
index a967d1779..000000000
--- a/src/collectors/python.d.plugin/icecast/icecast.chart.py
+++ /dev/null
@@ -1,94 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: icecast netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'listeners',
-]
-
-CHARTS = {
- 'listeners': {
- 'options': [None, 'Number Of Listeners', 'listeners', 'listeners', 'icecast.listeners', 'line'],
- 'lines': [
- ]
- }
-}
-
-
-class Source:
- def __init__(self, idx, data):
- self.name = 'source_{0}'.format(idx)
- self.is_active = data.get('stream_start') and data.get('server_name')
- self.listeners = data['listeners']
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url')
- self._manager = self._build_manager()
-
- def check(self):
- """
- Add active sources to the "listeners" chart
- :return: bool
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- active_sources = 0
- for idx, raw_source in enumerate(sources):
- if Source(idx, raw_source).is_active:
- active_sources += 1
- dim_id = 'source_{0}'.format(idx)
- dim = 'source {0}'.format(idx)
- self.definitions['listeners']['lines'].append([dim_id, dim])
-
- return bool(active_sources)
-
- def _get_data(self):
- """
- Get number of listeners for every source
- :return: dict
- """
- sources = self.get_sources()
- if not sources:
- return None
-
- data = dict()
-
- for idx, raw_source in enumerate(sources):
- source = Source(idx, raw_source)
- data[source.name] = source.listeners
-
- return data
-
- def get_sources(self):
- """
- Format data received from http request and return list of sources
- :return: list
- """
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
-
- try:
- data = json.loads(raw_data)
- except ValueError as error:
- self.error('JSON decode error:', error)
- return None
-
- sources = data['icestats'].get('source')
- if not sources:
- return None
-
- return sources if isinstance(sources, list) else [sources]
diff --git a/src/collectors/python.d.plugin/icecast/icecast.conf b/src/collectors/python.d.plugin/icecast/icecast.conf
deleted file mode 100644
index a33074aef..000000000
--- a/src/collectors/python.d.plugin/icecast/icecast.conf
+++ /dev/null
@@ -1,81 +0,0 @@
-# netdata python.d.plugin configuration for icecast
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, icecast also supports the following:
-#
-# url: 'URL' # the URL to fetch icecast's stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:8443/status-json.xsl'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1:8443/status-json.xsl' \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/icecast/integrations/icecast.md b/src/collectors/python.d.plugin/icecast/integrations/icecast.md
deleted file mode 100644
index 17316b063..000000000
--- a/src/collectors/python.d.plugin/icecast/integrations/icecast.md
+++ /dev/null
@@ -1,166 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/icecast/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/icecast/metadata.yaml"
-sidebar_label: "Icecast"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Media Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Icecast
-
-
-<img src="https://netdata.cloud/img/icecast.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: icecast
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Icecast listener counts.
-
-It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Icecast instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| icecast.listeners | a dimension for each active source | listeners |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Icecast minimum version
-
-Needs at least icecast version >= 2.4.0
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/icecast.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/icecast.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |
-| user | Username to use to connect to `url` if it's password protected. | | no |
-| pass | Password to use to connect to `url` if it's password protected. | | no |
-
-</details>
-
-#### Examples
-
-##### Remote Icecast server
-
-Configure a remote icecast server
-
-```yaml
-remote:
- url: 'http://1.2.3.4:8443/status-json.xsl'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `icecast` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin icecast debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/icecast/metadata.yaml b/src/collectors/python.d.plugin/icecast/metadata.yaml
deleted file mode 100644
index 4bcf5e39f..000000000
--- a/src/collectors/python.d.plugin/icecast/metadata.yaml
+++ /dev/null
@@ -1,127 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: icecast
- monitored_instance:
- name: Icecast
- link: 'https://icecast.org/'
- categories:
- - data-collection.media-streaming-servers
- icon_filename: 'icecast.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - icecast
- - streaming
- - media
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Icecast listener counts.'
- method_description: 'It connects to an icecast URL and uses the `status-json.xsl` endpoint to retrieve statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'Without configuration, the collector attempts to connect to http://localhost:8443/status-json.xsl'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Icecast minimum version'
- description: 'Needs at least icecast version >= 2.4.0'
- configuration:
- file:
- name: python.d/icecast.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: url
- description: The URL (and port) to the icecast server. Needs to also include `/status-json.xsl`
- default_value: 'http://localhost:8443/status-json.xsl'
- required: false
- - name: user
- description: Username to use to connect to `url` if it's password protected.
- default_value: ''
- required: false
- - name: pass
- description: Password to use to connect to `url` if it's password protected.
- default_value: ''
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Remote Icecast server
- description: Configure a remote icecast server
- folding:
- enabled: false
- config: |
- remote:
- url: 'http://1.2.3.4:8443/status-json.xsl'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: icecast.listeners
- description: Number Of Listeners
- unit: "listeners"
- chart_type: line
- dimensions:
- - name: a dimension for each active source
diff --git a/src/collectors/python.d.plugin/ipfs/integrations/ipfs.md b/src/collectors/python.d.plugin/ipfs/integrations/ipfs.md
deleted file mode 100644
index 71e8e28a5..000000000
--- a/src/collectors/python.d.plugin/ipfs/integrations/ipfs.md
+++ /dev/null
@@ -1,203 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/ipfs/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/ipfs/metadata.yaml"
-sidebar_label: "IPFS"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Storage, Mount Points and Filesystems"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# IPFS
-
-
-<img src="https://netdata.cloud/img/ipfs.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: ipfs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors IPFS server metrics about its quality and performance.
-
-It connects to an http endpoint of the IPFS server to collect the metrics
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the endpoint is accessible by the Agent, netdata will autodetect it
-
-#### Limits
-
-Calls to the following endpoints are disabled due to IPFS bugs:
-
-/api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
-/api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
-
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per IPFS instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| ipfs.bandwidth | in, out | kilobits/s |
-| ipfs.peers | peers | peers |
-| ipfs.repo_size | avail, size | GiB |
-| ipfs.repo_objects | objects, pinned, recursive_pins | objects |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ ipfs_datastore_usage ](https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf) | ipfs.repo_size | IPFS datastore utilization |
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/ipfs.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/ipfs.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary></summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
-| url | URL to the IPFS API | no | yes |
-| repoapi | Collect repo metrics. | no | no |
-| pinapi | Set status of IPFS pinned object polling. | no | no |
-
-</details>
-
-#### Examples
-
-##### Basic (default out-of-the-box)
-
-A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
-remote_host:
- name: 'remote'
- url: 'http://192.0.2.1:5001'
- repoapi: no
- pinapi: no
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `ipfs` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin ipfs debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/ipfs/ipfs.chart.py b/src/collectors/python.d.plugin/ipfs/ipfs.chart.py
deleted file mode 100644
index abfc9c492..000000000
--- a/src/collectors/python.d.plugin/ipfs/ipfs.chart.py
+++ /dev/null
@@ -1,149 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: IPFS netdata python.d module
-# Authors: davidak
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'bandwidth',
- 'peers',
- 'repo_size',
- 'repo_objects',
-]
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
- 'lines': [
- ['in', None, 'absolute', 8, 1000],
- ['out', None, 'absolute', -8, 1000]
- ]
- },
- 'peers': {
- 'options': [None, 'IPFS Peers', 'peers', 'Peers', 'ipfs.peers', 'line'],
- 'lines': [
- ['peers', None, 'absolute']
- ]
- },
- 'repo_size': {
- 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
- 'lines': [
- ['avail', None, 'absolute', 1, 1 << 30],
- ['size', None, 'absolute', 1, 1 << 30],
- ]
- },
- 'repo_objects': {
- 'options': [None, 'IPFS Repo Objects', 'objects', 'Objects', 'ipfs.repo_objects', 'line'],
- 'lines': [
- ['objects', None, 'absolute', 1, 1],
- ['pinned', None, 'absolute', 1, 1],
- ['recursive_pins', None, 'absolute', 1, 1]
- ]
- }
-}
-
-SI_zeroes = {
- 'k': 3,
- 'm': 6,
- 'g': 9,
- 't': 12,
- 'p': 15,
- 'e': 18,
- 'z': 21,
- 'y': 24
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.baseurl = self.configuration.get('url', 'http://localhost:5001')
- self.method = "POST"
- self.do_pinapi = self.configuration.get('pinapi')
- self.do_repoapi = self.configuration.get('repoapi')
- self.__storage_max = None
-
- def _get_json(self, sub_url):
- """
- :return: json decoding of the specified url
- """
- self.url = self.baseurl + sub_url
- try:
- return json.loads(self._get_raw_data())
- except (TypeError, ValueError):
- return dict()
-
- @staticmethod
- def _recursive_pins(keys):
- return sum(1 for k in keys if keys[k]['Type'] == b'recursive')
-
- @staticmethod
- def _dehumanize(store_max):
- # convert from '10Gb' to 10000000000
- if not isinstance(store_max, int):
- store_max = store_max.lower()
- if store_max.endswith('b'):
- val, units = store_max[:-2], store_max[-2]
- if units in SI_zeroes:
- val += '0' * SI_zeroes[units]
- store_max = val
- try:
- store_max = int(store_max)
- except (TypeError, ValueError):
- store_max = None
- return store_max
-
- def _storagemax(self, store_cfg):
- if self.__storage_max is None:
- self.__storage_max = self._dehumanize(store_cfg)
- return self.__storage_max
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- # suburl : List of (result-key, original-key, transform-func)
- cfg = {
- '/api/v0/stats/bw':
- [
- ('in', 'RateIn', int),
- ('out', 'RateOut', int),
- ],
- '/api/v0/swarm/peers':
- [
- ('peers', 'Peers', len),
- ],
- }
- if self.do_repoapi:
- cfg.update({
- '/api/v0/stats/repo':
- [
- ('size', 'RepoSize', int),
- ('objects', 'NumObjects', int),
- ('avail', 'StorageMax', self._storagemax),
- ],
- })
-
- if self.do_pinapi:
- cfg.update({
- '/api/v0/pin/ls':
- [
- ('pinned', 'Keys', len),
- ('recursive_pins', 'Keys', self._recursive_pins),
- ]
- })
- r = dict()
- for suburl in cfg:
- in_json = self._get_json(suburl)
- for new_key, orig_key, xmute in cfg[suburl]:
- try:
- r[new_key] = xmute(in_json[orig_key])
- except Exception as error:
- self.debug(error)
- return r or None
diff --git a/src/collectors/python.d.plugin/ipfs/ipfs.conf b/src/collectors/python.d.plugin/ipfs/ipfs.conf
deleted file mode 100644
index 8b167b399..000000000
--- a/src/collectors/python.d.plugin/ipfs/ipfs.conf
+++ /dev/null
@@ -1,82 +0,0 @@
-# netdata python.d.plugin configuration for ipfs
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, ipfs also supports the following:
-#
-# url: 'URL' # URL to the IPFS API
-# repoapi: no # Collect repo metrics
-# # Currently defaults to disabled due to IPFS Bug
-# # https://github.com/ipfs/go-ipfs/issues/7528
-# # resulting in very high CPU Usage
-# pinapi: no # Set status of IPFS pinned object polling
-# # Currently defaults to disabled due to IPFS Bug
-# # https://github.com/ipfs/go-ipfs/issues/3874
-# # resulting in very high CPU Usage
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
diff --git a/src/collectors/python.d.plugin/ipfs/metadata.yaml b/src/collectors/python.d.plugin/ipfs/metadata.yaml
deleted file mode 100644
index 55c39e31e..000000000
--- a/src/collectors/python.d.plugin/ipfs/metadata.yaml
+++ /dev/null
@@ -1,172 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: ipfs
- monitored_instance:
- name: IPFS
- link: "https://ipfs.tech/"
- categories:
- - data-collection.storage-mount-points-and-filesystems
- icon_filename: "ipfs.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors IPFS server metrics about its quality and performance."
- method_description: "It connects to an http endpoint of the IPFS server to collect the metrics"
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "If the endpoint is accessible by the Agent, netdata will autodetect it"
- limits:
- description: |
- Calls to the following endpoints are disabled due to IPFS bugs:
-
- /api/v0/stats/repo (https://github.com/ipfs/go-ipfs/issues/3874)
- /api/v0/pin/ls (https://github.com/ipfs/go-ipfs/issues/7528)
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/ipfs.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: ""
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: The JOB's name as it will appear at the dashboard (by default is the job_name)
- default_value: job_name
- required: false
- - name: url
- description: URL to the IPFS API
- default_value: no
- required: true
- - name: repoapi
- description: Collect repo metrics.
- default_value: no
- required: false
- - name: pinapi
- description: Set status of IPFS pinned object polling.
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic (default out-of-the-box)
- description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:5001'
- repoapi: no
- pinapi: no
-
- remote_host:
- name: 'remote'
- url: 'http://192.0.2.1:5001'
- repoapi: no
- pinapi: no
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: ipfs_datastore_usage
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/ipfs.conf
- metric: ipfs.repo_size
- info: IPFS datastore utilization
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: ipfs.bandwidth
- description: IPFS Bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: in
- - name: out
- - name: ipfs.peers
- description: IPFS Peers
- unit: "peers"
- chart_type: line
- dimensions:
- - name: peers
- - name: ipfs.repo_size
- description: IPFS Repo Size
- unit: "GiB"
- chart_type: area
- dimensions:
- - name: avail
- - name: size
- - name: ipfs.repo_objects
- description: IPFS Repo Objects
- unit: "objects"
- chart_type: line
- dimensions:
- - name: objects
- - name: pinned
- - name: recursive_pins
diff --git a/src/collectors/python.d.plugin/memcached/memcached.chart.py b/src/collectors/python.d.plugin/memcached/memcached.chart.py
deleted file mode 100644
index adb9560b7..000000000
--- a/src/collectors/python.d.plugin/memcached/memcached.chart.py
+++ /dev/null
@@ -1,197 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: memcached netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'cache',
- 'net',
- 'connections',
- 'items',
- 'evicted_reclaimed',
- 'get',
- 'get_rate',
- 'set_rate',
- 'cas',
- 'delete',
- 'increment',
- 'decrement',
- 'touch',
- 'touch_rate',
-]
-
-CHARTS = {
- 'cache': {
- 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
- 'lines': [
- ['avail', 'available', 'absolute', 1, 1 << 20],
- ['used', 'used', 'absolute', 1, 1 << 20]
- ]
- },
- 'net': {
- 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
- 'lines': [
- ['bytes_read', 'in', 'incremental', 8, 1000],
- ['bytes_written', 'out', 'incremental', -8, 1000],
- ]
- },
- 'connections': {
- 'options': [None, 'Connections', 'connections/s', 'connections', 'memcached.connections', 'line'],
- 'lines': [
- ['curr_connections', 'current', 'incremental'],
- ['rejected_connections', 'rejected', 'incremental'],
- ['total_connections', 'total', 'incremental']
- ]
- },
- 'items': {
- 'options': [None, 'Items', 'items', 'items', 'memcached.items', 'line'],
- 'lines': [
- ['curr_items', 'current', 'absolute'],
- ['total_items', 'total', 'absolute']
- ]
- },
- 'evicted_reclaimed': {
- 'options': [None, 'Evicted and Reclaimed Items', 'items', 'items', 'memcached.evicted_reclaimed', 'line'],
- 'lines': [
- ['reclaimed', 'reclaimed', 'absolute'],
- ['evictions', 'evicted', 'absolute']
- ]
- },
- 'get': {
- 'options': [None, 'Get Requests', 'requests', 'get ops', 'memcached.get', 'stacked'],
- 'lines': [
- ['get_hits', 'hits', 'percent-of-absolute-row'],
- ['get_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'get_rate': {
- 'options': [None, 'Get Request Rate', 'requests/s', 'get ops', 'memcached.get_rate', 'line'],
- 'lines': [
- ['cmd_get', 'rate', 'incremental']
- ]
- },
- 'set_rate': {
- 'options': [None, 'Set Request Rate', 'requests/s', 'set ops', 'memcached.set_rate', 'line'],
- 'lines': [
- ['cmd_set', 'rate', 'incremental']
- ]
- },
- 'delete': {
- 'options': [None, 'Delete Requests', 'requests', 'delete ops', 'memcached.delete', 'stacked'],
- 'lines': [
- ['delete_hits', 'hits', 'percent-of-absolute-row'],
- ['delete_misses', 'misses', 'percent-of-absolute-row'],
- ]
- },
- 'cas': {
- 'options': [None, 'Check and Set Requests', 'requests', 'check and set ops', 'memcached.cas', 'stacked'],
- 'lines': [
- ['cas_hits', 'hits', 'percent-of-absolute-row'],
- ['cas_misses', 'misses', 'percent-of-absolute-row'],
- ['cas_badval', 'bad value', 'percent-of-absolute-row']
- ]
- },
- 'increment': {
- 'options': [None, 'Increment Requests', 'requests', 'increment ops', 'memcached.increment', 'stacked'],
- 'lines': [
- ['incr_hits', 'hits', 'percent-of-absolute-row'],
- ['incr_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'decrement': {
- 'options': [None, 'Decrement Requests', 'requests', 'decrement ops', 'memcached.decrement', 'stacked'],
- 'lines': [
- ['decr_hits', 'hits', 'percent-of-absolute-row'],
- ['decr_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'touch': {
- 'options': [None, 'Touch Requests', 'requests', 'touch ops', 'memcached.touch', 'stacked'],
- 'lines': [
- ['touch_hits', 'hits', 'percent-of-absolute-row'],
- ['touch_misses', 'misses', 'percent-of-absolute-row']
- ]
- },
- 'touch_rate': {
- 'options': [None, 'Touch Request Rate', 'requests/s', 'touch ops', 'memcached.touch_rate', 'line'],
- 'lines': [
- ['cmd_touch', 'rate', 'incremental']
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.request = 'stats\r\n'
- self.host = 'localhost'
- self.port = 11211
- self._keep_alive = True
- self.unix_socket = None
-
- def _get_data(self):
- """
- Get data from socket
- :return: dict
- """
- response = self._get_raw_data()
- if response is None:
- # error has already been logged
- return None
-
- if response.startswith('ERROR'):
- self.error('received ERROR')
- return None
-
- try:
- parsed = response.split('\n')
- except AttributeError:
- self.error('response is invalid/empty')
- return None
-
- # split the response
- data = {}
- for line in parsed:
- if line.startswith('STAT'):
- try:
- t = line[5:].split(' ')
- data[t[0]] = t[1]
- except (IndexError, ValueError):
- self.debug('invalid line received: ' + str(line))
-
- if not data:
- self.error("received data doesn't have any records")
- return None
-
- # custom calculations
- try:
- data['avail'] = int(data['limit_maxbytes']) - int(data['bytes'])
- data['used'] = int(data['bytes'])
- except (KeyError, ValueError, TypeError):
- pass
-
- return data
-
- def _check_raw_data(self, data):
- if data.endswith('END\r\n'):
- self.debug('received full response from memcached')
- return True
-
- self.debug('waiting more data from memcached')
- return False
-
- def check(self):
- """
- Parse configuration, check if memcached is available
- :return: boolean
- """
- self._parse_config()
- data = self._get_data()
- if data is None:
- return False
- return True
diff --git a/src/collectors/python.d.plugin/memcached/memcached.conf b/src/collectors/python.d.plugin/memcached/memcached.conf
deleted file mode 100644
index 3286b4623..000000000
--- a/src/collectors/python.d.plugin/memcached/memcached.conf
+++ /dev/null
@@ -1,90 +0,0 @@
-# netdata python.d.plugin configuration for memcached
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, memcached also supports the following:
-#
-# socket: 'path/to/memcached.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 11211
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 11211
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 11211
-
diff --git a/src/collectors/python.d.plugin/monit/integrations/monit.md b/src/collectors/python.d.plugin/monit/integrations/monit.md
deleted file mode 100644
index d14d2a963..000000000
--- a/src/collectors/python.d.plugin/monit/integrations/monit.md
+++ /dev/null
@@ -1,214 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/monit/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/monit/metadata.yaml"
-sidebar_label: "Monit"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Synthetic Checks"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Monit
-
-
-<img src="https://netdata.cloud/img/monit.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: monit
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
-
-
-It gathers data from Monit's XML interface.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will attempt to connect to Monit at `http://localhost:2812`
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Monit instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| monit.filesystems | a dimension per target | filesystems |
-| monit.directories | a dimension per target | directories |
-| monit.files | a dimension per target | files |
-| monit.fifos | a dimension per target | pipes |
-| monit.programs | a dimension per target | programs |
-| monit.services | a dimension per target | processes |
-| monit.process_uptime | a dimension per target | seconds |
-| monit.process_threads | a dimension per target | threads |
-| monit.process_childrens | a dimension per target | children |
-| monit.hosts | a dimension per target | hosts |
-| monit.host_latency | a dimension per target | milliseconds |
-| monit.networks | a dimension per target | interfaces |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/monit.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/monit.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
-| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |
-| user | Username in case the URL is password protected. | | no |
-| pass | Password in case the URL is password protected. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
-
-```
-##### Basic Authentication
-
-Example using basic username and password in order to authenticate.
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
- user: 'foo'
- pass: 'bar'
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local'
- url: 'http://localhost:2812'
-
-remote_job:
- name: 'remote'
- url: 'http://192.0.2.1:2812'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `monit` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin monit debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/monit/metadata.yaml b/src/collectors/python.d.plugin/monit/metadata.yaml
deleted file mode 100644
index b51273188..000000000
--- a/src/collectors/python.d.plugin/monit/metadata.yaml
+++ /dev/null
@@ -1,217 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: monit
- monitored_instance:
- name: Monit
- link: https://mmonit.com/monit/
- categories:
- - data-collection.synthetic-checks
- icon_filename: "monit.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - monit
- - mmonit
- - supervision tool
- - monitrc
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Monit targets such as filesystems, directories, files, FIFO pipes and more.
- method_description: |
- It gathers data from Monit's XML interface.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will attempt to connect to Monit at `http://localhost:2812`
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/monit.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: "local"
- required: false
- - name: url
- description: The URL to fetch Monit's metrics.
- default_value: http://localhost:2812
- required: true
- - name: user
- description: Username in case the URL is password protected.
- default_value: ""
- required: false
- - name: pass
- description: Password in case the URL is password protected.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:2812'
- - name: Basic Authentication
- description: Example using basic username and password in order to authenticate.
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:2812'
- user: 'foo'
- pass: 'bar'
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- localhost:
- name: 'local'
- url: 'http://localhost:2812'
-
- remote_job:
- name: 'remote'
- url: 'http://192.0.2.1:2812'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: monit.filesystems
- description: Filesystems
- unit: "filesystems"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.directories
- description: Directories
- unit: "directories"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.files
- description: Files
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.fifos
- description: Pipes (fifo)
- unit: "pipes"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.programs
- description: Programs statuses
- unit: "programs"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.services
- description: Processes statuses
- unit: "processes"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_uptime
- description: Processes uptime
- unit: "seconds"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_threads
- description: Processes threads
- unit: "threads"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.process_childrens
- description: Child processes
- unit: "children"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.hosts
- description: Hosts
- unit: "hosts"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.host_latency
- description: Hosts latency
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per target
- - name: monit.networks
- description: Network interfaces and addresses
- unit: "interfaces"
- chart_type: line
- dimensions:
- - name: a dimension per target
diff --git a/src/collectors/python.d.plugin/monit/monit.chart.py b/src/collectors/python.d.plugin/monit/monit.chart.py
deleted file mode 100644
index 5d926961b..000000000
--- a/src/collectors/python.d.plugin/monit/monit.chart.py
+++ /dev/null
@@ -1,360 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: monit netdata python.d module
-# Author: Evgeniy K. (n0guest)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import xml.etree.ElementTree as ET
-from collections import namedtuple
-
-from bases.FrameworkServices.UrlService import UrlService
-
-MonitType = namedtuple('MonitType', ('index', 'name'))
-
-# see enum Service_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
-# typedef enum {
-# Service_Filesystem = 0,
-# Service_Directory,
-# Service_File,
-# Service_Process,
-# Service_Host,
-# Service_System,
-# Service_Fifo,
-# Service_Program,
-# Service_Net,
-# Service_Last = Service_Net
-# } __attribute__((__packed__)) Service_Type;
-
-TYPE_FILESYSTEM = MonitType(0, 'filesystem')
-TYPE_DIRECTORY = MonitType(1, 'directory')
-TYPE_FILE = MonitType(2, 'file')
-TYPE_PROCESS = MonitType(3, 'process')
-TYPE_HOST = MonitType(4, 'host')
-TYPE_SYSTEM = MonitType(5, 'system')
-TYPE_FIFO = MonitType(6, 'fifo')
-TYPE_PROGRAM = MonitType(7, 'program')
-TYPE_NET = MonitType(8, 'net')
-
-TYPES = (
- TYPE_FILESYSTEM,
- TYPE_DIRECTORY,
- TYPE_FILE,
- TYPE_PROCESS,
- TYPE_HOST,
- TYPE_SYSTEM,
- TYPE_FIFO,
- TYPE_PROGRAM,
- TYPE_NET,
-)
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- 'filesystem',
- 'directory',
- 'file',
- 'process',
- 'process_uptime',
- 'process_threads',
- 'process_children',
- 'host',
- 'host_latency',
- 'system',
- 'fifo',
- 'program',
- 'net'
-]
-
-CHARTS = {
- 'filesystem': {
- 'options': ['filesystems', 'Filesystems', 'filesystems', 'filesystem', 'monit.filesystems', 'line'],
- 'lines': []
- },
- 'directory': {
- 'options': ['directories', 'Directories', 'directories', 'filesystem', 'monit.directories', 'line'],
- 'lines': []
- },
- 'file': {
- 'options': ['files', 'Files', 'files', 'filesystem', 'monit.files', 'line'],
- 'lines': []
- },
- 'fifo': {
- 'options': ['fifos', 'Pipes (fifo)', 'pipes', 'filesystem', 'monit.fifos', 'line'],
- 'lines': []
- },
- 'program': {
- 'options': ['programs', 'Programs statuses', 'programs', 'applications', 'monit.programs', 'line'],
- 'lines': []
- },
- 'process': {
- 'options': ['processes', 'Processes statuses', 'processes', 'applications', 'monit.services', 'line'],
- 'lines': []
- },
- 'process_uptime': {
- 'options': ['processes uptime', 'Processes uptime', 'seconds', 'applications',
- 'monit.process_uptime', 'line', 'hidden'],
- 'lines': []
- },
- 'process_threads': {
- 'options': ['processes threads', 'Processes threads', 'threads', 'applications',
- 'monit.process_threads', 'line'],
- 'lines': []
- },
- 'process_children': {
- 'options': ['processes childrens', 'Child processes', 'children', 'applications',
- 'monit.process_childrens', 'line'],
- 'lines': []
- },
- 'host': {
- 'options': ['hosts', 'Hosts', 'hosts', 'network', 'monit.hosts', 'line'],
- 'lines': []
- },
- 'host_latency': {
- 'options': ['hosts latency', 'Hosts latency', 'milliseconds', 'network', 'monit.host_latency', 'line'],
- 'lines': []
- },
- 'net': {
- 'options': ['interfaces', 'Network interfaces and addresses', 'interfaces', 'network',
- 'monit.networks', 'line'],
- 'lines': []
- },
-}
-
-
-class BaseMonitService(object):
- def __init__(self, typ, name, status, monitor):
- self.type = typ
- self.name = name
- self.status = status
- self.monitor = monitor
-
- def __repr__(self):
- return 'MonitService({0}:{1})'.format(self.type.name, self.name)
-
- def __eq__(self, other):
- if not isinstance(other, BaseMonitService):
- return False
- return self.type == other.type and self.name == other.name
-
- def __ne__(self, other):
- return not self == other
-
- def __hash__(self):
- return hash(repr(self))
-
- def is_running(self):
- return self.status == '0' and self.monitor == '1'
-
- def key(self):
- return '{0}_{1}'.format(self.type.name, self.name)
-
- def data(self):
- return {self.key(): int(self.is_running())}
-
-
-class ProcessMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
- super(ProcessMonitService, self).__init__(typ, name, status, monitor)
- self.uptime = None
- self.threads = None
- self.children = None
-
- def __eq__(self, other):
- return super(ProcessMonitService, self).__eq__(other)
-
- def __ne__(self, other):
- return super(ProcessMonitService, self).__ne__(other)
-
- def __hash__(self):
- return super(ProcessMonitService, self).__hash__()
-
- def uptime_key(self):
- return 'process_uptime_{0}'.format(self.name)
-
- def threads_key(self):
- return 'process_threads_{0}'.format(self.name)
-
- def children_key(self):
- return 'process_children_{0}'.format(self.name)
-
- def data(self):
- base_data = super(ProcessMonitService, self).data()
- # skipping bugged metrics with negative uptime (monit before v5.16)
- uptime = self.uptime if self.uptime and int(self.uptime) >= 0 else None
- data = {
- self.uptime_key(): uptime,
- self.threads_key(): self.threads,
- self.children_key(): self.children,
- }
- data.update(base_data)
-
- return data
-
-
-class HostMonitService(BaseMonitService):
- def __init__(self, typ, name, status, monitor):
- super(HostMonitService, self).__init__(typ, name, status, monitor)
- self.latency = None
-
- def __eq__(self, other):
- return super(HostMonitService, self).__eq__(other)
-
- def __ne__(self, other):
- return super(HostMonitService, self).__ne__(other)
-
- def __hash__(self):
- return super(HostMonitService, self).__hash__()
-
- def latency_key(self):
- return 'host_latency_{0}'.format(self.name)
-
- def data(self):
- base_data = super(HostMonitService, self).data()
- latency = float(self.latency) * 1000000 if self.latency else None
- data = {self.latency_key(): latency}
- data.update(base_data)
-
- return data
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- base_url = self.configuration.get('url', "http://localhost:2812")
- self.url = '{0}/_status?format=xml&level=full'.format(base_url)
- self.active_services = list()
-
- def parse(self, raw):
- try:
- root = ET.fromstring(raw)
- except ET.ParseError:
- self.error("URL {0} didn't return a valid XML page. Please check your settings.".format(self.url))
- return None
- return root
-
- def _get_data(self):
- raw = self._get_raw_data()
- if not raw:
- return None
-
- root = self.parse(raw)
- if root is None:
- return None
-
- services = self.get_services(root)
- if not services:
- return None
-
- if len(self.charts) > 0:
- self.update_charts(services)
-
- data = dict()
-
- for svc in services:
- data.update(svc.data())
-
- return data
-
- def get_services(self, root):
- services = list()
-
- for typ in TYPES:
- if typ == TYPE_SYSTEM:
- self.debug("skipping service from '{0}' category, it's useless in graphs".format(TYPE_SYSTEM.name))
- continue
-
- xpath_query = "./service[@type='{0}']".format(typ.index)
- self.debug('Searching for {0} as {1}'.format(typ.name, xpath_query))
-
- for svc_root in root.findall(xpath_query):
- svc = create_service(svc_root, typ)
- self.debug('=> found {0} with type={1}, status={2}, monitoring={3}'.format(
- svc.name, svc.type.name, svc.status, svc.monitor))
-
- services.append(svc)
-
- return services
-
- def update_charts(self, services):
- remove = [svc for svc in self.active_services if svc not in services]
- add = [svc for svc in services if svc not in self.active_services]
-
- self.remove_services_from_charts(remove)
- self.add_services_to_charts(add)
-
- self.active_services = services
-
- def add_services_to_charts(self, services):
- for svc in services:
- if svc.type == TYPE_HOST:
- self.charts['host_latency'].add_dimension([svc.latency_key(), svc.name, 'absolute', 1000, 1000000])
- if svc.type == TYPE_PROCESS:
- self.charts['process_uptime'].add_dimension([svc.uptime_key(), svc.name])
- self.charts['process_threads'].add_dimension([svc.threads_key(), svc.name])
- self.charts['process_children'].add_dimension([svc.children_key(), svc.name])
- self.charts[svc.type.name].add_dimension([svc.key(), svc.name])
-
- def remove_services_from_charts(self, services):
- for svc in services:
- if svc.type == TYPE_HOST:
- self.charts['host_latency'].del_dimension(svc.latency_key(), False)
- if svc.type == TYPE_PROCESS:
- self.charts['process_uptime'].del_dimension(svc.uptime_key(), False)
- self.charts['process_threads'].del_dimension(svc.threads_key(), False)
- self.charts['process_children'].del_dimension(svc.children_key(), False)
- self.charts[svc.type.name].del_dimension(svc.key(), False)
-
-
-def create_service(root, typ):
- if typ == TYPE_HOST:
- return create_host_service(root)
- elif typ == TYPE_PROCESS:
- return create_process_service(root)
- return create_base_service(root, typ)
-
-
-def create_host_service(root):
- svc = HostMonitService(
- TYPE_HOST,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
-
- latency = root.find('./icmp/responsetime')
- if latency is not None:
- svc.latency = latency.text
-
- return svc
-
-
-def create_process_service(root):
- svc = ProcessMonitService(
- TYPE_PROCESS,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
-
- uptime = root.find('uptime')
- if uptime is not None:
- svc.uptime = uptime.text
-
- threads = root.find('threads')
- if threads is not None:
- svc.threads = threads.text
-
- children = root.find('children')
- if children is not None:
- svc.children = children.text
-
- return svc
-
-
-def create_base_service(root, typ):
- return BaseMonitService(
- typ,
- root.find('name').text,
- root.find('status').text,
- root.find('monitor').text,
- )
diff --git a/src/collectors/python.d.plugin/monit/monit.conf b/src/collectors/python.d.plugin/monit/monit.conf
deleted file mode 100644
index 9a3fb6938..000000000
--- a/src/collectors/python.d.plugin/monit/monit.conf
+++ /dev/null
@@ -1,86 +0,0 @@
-# netdata python.d.plugin configuration for monit
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, this plugin also supports the following:
-#
-# url: 'URL' # the URL to fetch monit's status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# Example
-#
-# local:
-# name : 'Local Monit'
-# url : 'http://localhost:2812'
-#
-# "local" will show up in Netdata logs. "Reverse Proxy" will show up in the menu
-# in the monit section.
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:2812'
diff --git a/src/collectors/python.d.plugin/nsd/README.md b/src/collectors/python.d.plugin/nsd/README.md
deleted file mode 120000
index 59fcfe491..000000000
--- a/src/collectors/python.d.plugin/nsd/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/name_server_daemon.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md b/src/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
deleted file mode 100644
index 357812d3d..000000000
--- a/src/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
+++ /dev/null
@@ -1,199 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/nsd/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/nsd/metadata.yaml"
-sidebar_label: "Name Server Daemon"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/DNS and DHCP Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Name Server Daemon
-
-
-<img src="https://netdata.cloud/img/nsd.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: nsd
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors NSD statistics like queries, zones, protocols, query types and more.
-
-
-It uses the `nsd-control stats_noreset` command to gather metrics.
-
-
-This collector is supported on all platforms.
-
-This collector only supports collecting metrics from a single instance of this integration.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Name Server Daemon instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| nsd.queries | queries | queries/s |
-| nsd.zones | master, slave | zones |
-| nsd.protocols | udp, udp6, tcp, tcp6 | queries/s |
-| nsd.type | A, NS, CNAME, SOA, PTR, HINFO, MX, NAPTR, TXT, AAAA, SRV, ANY | queries/s |
-| nsd.transfer | NOTIFY, AXFR | queries/s |
-| nsd.rcode | NOERROR, FORMERR, SERVFAIL, NXDOMAIN, NOTIMP, REFUSED, YXDOMAIN | queries/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### NSD version
-
-The version of `nsd` must be 4.0+.
-
-
-#### Provide Netdata the permissions to run the command
-
-Netdata must have permissions to run the `nsd-control stats_noreset` command.
-
-You can:
-
-- Add "netdata" user to "nsd" group:
- ```
- usermod -aG nsd netdata
- ```
-- Add Netdata to sudoers
- 1. Edit the sudoers file:
- ```
- visudo -f /etc/sudoers.d/netdata
- ```
- 2. Add the entry:
- ```
- Defaults:netdata !requiretty
- netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
- ```
-
- > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/nsd.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/nsd.conf
-```
-#### Options
-
-This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 30 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| command | The command to run | nsd-control stats_noreset | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-local:
- name: 'nsd_local'
- command: 'nsd-control stats_noreset'
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `nsd` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin nsd debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/nsd/metadata.yaml b/src/collectors/python.d.plugin/nsd/metadata.yaml
deleted file mode 100644
index f5e2c46b0..000000000
--- a/src/collectors/python.d.plugin/nsd/metadata.yaml
+++ /dev/null
@@ -1,201 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: nsd
- monitored_instance:
- name: Name Server Daemon
- link: https://nsd.docs.nlnetlabs.nl/en/latest/#
- categories:
- - data-collection.dns-and-dhcp-servers
- icon_filename: "nsd.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - nsd
- - name server daemon
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors NSD statistics like queries, zones, protocols, query types and more.
- method_description: |
- It uses the `nsd-control stats_noreset` command to gather metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: false
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: If permissions are satisfied, the collector will be able to run `nsd-control stats_noreset`, thus collecting metrics.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: NSD version
- description: |
- The version of `nsd` must be 4.0+.
- - title: Provide Netdata the permissions to run the command
- description: |
- Netdata must have permissions to run the `nsd-control stats_noreset` command.
-
- You can:
-
- - Add "netdata" user to "nsd" group:
- ```
- usermod -aG nsd netdata
- ```
- - Add Netdata to sudoers
- 1. Edit the sudoers file:
- ```
- visudo -f /etc/sudoers.d/netdata
- ```
- 2. Add the entry:
- ```
- Defaults:netdata !requiretty
- netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
- ```
-
- > Note that you will need to set the `command` option to `sudo /usr/sbin/nsd-control stats_noreset` if you use this method.
-
- configuration:
- file:
- name: "python.d/nsd.conf"
- options:
- description: |
- This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 30
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed
- running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: command
- description: The command to run
- default_value: "nsd-control stats_noreset"
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic configuration example.
- folding:
- enabled: false
- config: |
- local:
- name: 'nsd_local'
- command: 'nsd-control stats_noreset'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: nsd.queries
- description: queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: nsd.zones
- description: zones
- unit: "zones"
- chart_type: stacked
- dimensions:
- - name: master
- - name: slave
- - name: nsd.protocols
- description: protocol
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: udp
- - name: udp6
- - name: tcp
- - name: tcp6
- - name: nsd.type
- description: query type
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: A
- - name: NS
- - name: CNAME
- - name: SOA
- - name: PTR
- - name: HINFO
- - name: MX
- - name: NAPTR
- - name: TXT
- - name: AAAA
- - name: SRV
- - name: ANY
- - name: nsd.transfer
- description: transfer
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: NOTIFY
- - name: AXFR
- - name: nsd.rcode
- description: return code
- unit: "queries/s"
- chart_type: stacked
- dimensions:
- - name: NOERROR
- - name: FORMERR
- - name: SERVFAIL
- - name: NXDOMAIN
- - name: NOTIMP
- - name: REFUSED
- - name: YXDOMAIN
diff --git a/src/collectors/python.d.plugin/nsd/nsd.chart.py b/src/collectors/python.d.plugin/nsd/nsd.chart.py
deleted file mode 100644
index 6f9b2cec8..000000000
--- a/src/collectors/python.d.plugin/nsd/nsd.chart.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: NSD `nsd-control stats_noreset` netdata python.d module
-# Author: <383c57 at gmail.com>
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-update_every = 30
-
-NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
-REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
-
-ORDER = [
- 'queries',
- 'zones',
- 'protocol',
- 'type',
- 'transfer',
- 'rcode',
-]
-
-CHARTS = {
- 'queries': {
- 'options': [None, 'queries', 'queries/s', 'queries', 'nsd.queries', 'line'],
- 'lines': [
- ['num_queries', 'queries', 'incremental']
- ]
- },
- 'zones': {
- 'options': [None, 'zones', 'zones', 'zones', 'nsd.zones', 'stacked'],
- 'lines': [
- ['zone_master', 'master', 'absolute'],
- ['zone_slave', 'slave', 'absolute']
- ]
- },
- 'protocol': {
- 'options': [None, 'protocol', 'queries/s', 'protocol', 'nsd.protocols', 'stacked'],
- 'lines': [
- ['num_udp', 'udp', 'incremental'],
- ['num_udp6', 'udp6', 'incremental'],
- ['num_tcp', 'tcp', 'incremental'],
- ['num_tcp6', 'tcp6', 'incremental']
- ]
- },
- 'type': {
- 'options': [None, 'query type', 'queries/s', 'query type', 'nsd.type', 'stacked'],
- 'lines': [
- ['num_type_A', 'A', 'incremental'],
- ['num_type_NS', 'NS', 'incremental'],
- ['num_type_CNAME', 'CNAME', 'incremental'],
- ['num_type_SOA', 'SOA', 'incremental'],
- ['num_type_PTR', 'PTR', 'incremental'],
- ['num_type_HINFO', 'HINFO', 'incremental'],
- ['num_type_MX', 'MX', 'incremental'],
- ['num_type_NAPTR', 'NAPTR', 'incremental'],
- ['num_type_TXT', 'TXT', 'incremental'],
- ['num_type_AAAA', 'AAAA', 'incremental'],
- ['num_type_SRV', 'SRV', 'incremental'],
- ['num_type_TYPE255', 'ANY', 'incremental']
- ]
- },
- 'transfer': {
- 'options': [None, 'transfer', 'queries/s', 'transfer', 'nsd.transfer', 'stacked'],
- 'lines': [
- ['num_opcode_NOTIFY', 'NOTIFY', 'incremental'],
- ['num_type_TYPE252', 'AXFR', 'incremental']
- ]
- },
- 'rcode': {
- 'options': [None, 'return code', 'queries/s', 'return code', 'nsd.rcode', 'stacked'],
- 'lines': [
- ['num_rcode_NOERROR', 'NOERROR', 'incremental'],
- ['num_rcode_FORMERR', 'FORMERR', 'incremental'],
- ['num_rcode_SERVFAIL', 'SERVFAIL', 'incremental'],
- ['num_rcode_NXDOMAIN', 'NXDOMAIN', 'incremental'],
- ['num_rcode_NOTIMP', 'NOTIMP', 'incremental'],
- ['num_rcode_REFUSED', 'REFUSED', 'incremental'],
- ['num_rcode_YXDOMAIN', 'YXDOMAIN', 'incremental']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = NSD_CONTROL_COMMAND
-
- def _get_data(self):
- lines = self._get_raw_data()
- if not lines:
- return None
-
- stats = dict(
- (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
- )
- stats.setdefault('num_opcode_NOTIFY', 0)
- stats.setdefault('num_type_TYPE252', 0)
- stats.setdefault('num_type_TYPE255', 0)
-
- return stats
diff --git a/src/collectors/python.d.plugin/nsd/nsd.conf b/src/collectors/python.d.plugin/nsd/nsd.conf
deleted file mode 100644
index 77a8a3177..000000000
--- a/src/collectors/python.d.plugin/nsd/nsd.conf
+++ /dev/null
@@ -1,91 +0,0 @@
-# netdata python.d.plugin configuration for nsd
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# nsd-control is slow, so once every 30 seconds
-# update_every: 30
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, nsd also supports the following:
-#
-# command: 'nsd-control stats_noreset' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# IMPORTANT Information
-#
-# Netdata must have permissions to run `nsd-control stats_noreset` command
-#
-# - Example-1 (use "sudo")
-# 1. sudoers (e.g. visudo -f /etc/sudoers.d/netdata)
-# Defaults:netdata !requiretty
-# netdata ALL=(ALL) NOPASSWD: /usr/sbin/nsd-control stats_noreset
-# 2. etc/netdata/python.d/nsd.conf
-# local:
-# update_every: 30
-# command: 'sudo /usr/sbin/nsd-control stats_noreset'
-#
-# - Example-2 (add "netdata" user to "nsd" group)
-# usermod -aG nsd netdata
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- update_every: 30
- command: 'nsd-control stats_noreset'
diff --git a/src/collectors/python.d.plugin/nvidia_smi/README.md b/src/collectors/python.d.plugin/nvidia_smi/README.md
deleted file mode 100644
index 240b65af3..000000000
--- a/src/collectors/python.d.plugin/nvidia_smi/README.md
+++ /dev/null
@@ -1,81 +0,0 @@
-<!--
-title: "Nvidia GPU monitoring with Netdata"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/nvidia_smi/README.md"
-sidebar_label: "nvidia_smi-python.d.plugin"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/Devices"
--->
-
-# Nvidia GPU collector
-
-Monitors performance metrics (memory usage, fan speed, pcie bandwidth utilization, temperature, etc.) using `nvidia-smi` cli tool.
-
-## Requirements
-
-- The `nvidia-smi` tool installed and your NVIDIA GPU(s) must support the tool. Mostly the newer high end models used for AI / ML and Crypto or Pro range, read more about [nvidia_smi](https://developer.nvidia.com/nvidia-system-management-interface).
-- Enable this plugin, as it's disabled by default due to minor performance issues:
- ```bash
- cd /etc/netdata # Replace this path with your Netdata config directory, if different
- sudo ./edit-config python.d.conf
- ```
- Remove the '#' before nvidia_smi so it reads: `nvidia_smi: yes`.
-- On some systems when the GPU is idle the `nvidia-smi` tool unloads and there is added latency again when it is next queried. If you are running GPUs under constant workload this isn't likely to be an issue.
-
-If using Docker, see [Netdata Docker container with NVIDIA GPUs monitoring](https://github.com/netdata/netdata/tree/master/packaging/docker#with-nvidia-gpus-monitoring).
-
-## Charts
-
-It produces the following charts:
-
-- PCI Express Bandwidth Utilization in `KiB/s`
-- Fan Speed in `percentage`
-- GPU Utilization in `percentage`
-- Memory Bandwidth Utilization in `percentage`
-- Encoder/Decoder Utilization in `percentage`
-- Memory Usage in `MiB`
-- Temperature in `celsius`
-- Clock Frequencies in `MHz`
-- Power Utilization in `Watts`
-- Memory Used by Each Process in `MiB`
-- Memory Used by Each User in `MiB`
-- Number of User on GPU in `num`
-
-## Configuration
-
-Edit the `python.d/nvidia_smi.conf` configuration file using `edit-config` from the Netdata [config
-directory](/docs/netdata-agent/configuration/README.md), which is typically at `/etc/netdata`.
-
-```bash
-cd /etc/netdata # Replace this path with your Netdata config directory, if different
-sudo ./edit-config python.d/nvidia_smi.conf
-```
-
-Sample:
-
-```yaml
-loop_mode : yes
-poll_seconds : 1
-exclude_zero_memory_users : yes
-```
-
-
-### Troubleshooting
-
-To troubleshoot issues with the `nvidia_smi` module, run the `python.d.plugin` with the debug option enabled. The
-output will give you the output of the data collection job or error messages on why the collector isn't working.
-
-First, navigate to your plugins directory, usually they are located under `/usr/libexec/netdata/plugins.d/`. If that's
-not the case on your system, open `netdata.conf` and look for the setting `plugins directory`. Once you're in the
-plugin's directory, switch to the `netdata` user.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash netdata
-```
-
-Now you can manually run the `nvidia_smi` module in debug mode:
-
-```bash
-./python.d.plugin nvidia_smi debug trace
-```
diff --git a/src/collectors/python.d.plugin/nvidia_smi/metadata.yaml b/src/collectors/python.d.plugin/nvidia_smi/metadata.yaml
deleted file mode 100644
index 0b049d31b..000000000
--- a/src/collectors/python.d.plugin/nvidia_smi/metadata.yaml
+++ /dev/null
@@ -1,166 +0,0 @@
-# This collector will not appear in documentation, as the go version is preferred,
-# /src/go/collectors/go.d.plugin/modules/nvidia_smi/README.md
-#
-# meta:
-# plugin_name: python.d.plugin
-# module_name: nvidia_smi
-# monitored_instance:
-# name: python.d nvidia_smi
-# link: ''
-# categories: []
-# icon_filename: ''
-# related_resources:
-# integrations:
-# list: []
-# info_provided_to_referring_integrations:
-# description: ''
-# keywords: []
-# most_popular: false
-# overview:
-# data_collection:
-# metrics_description: ''
-# method_description: ''
-# supported_platforms:
-# include: []
-# exclude: []
-# multi_instance: true
-# additional_permissions:
-# description: ''
-# default_behavior:
-# auto_detection:
-# description: ''
-# limits:
-# description: ''
-# performance_impact:
-# description: ''
-# setup:
-# prerequisites:
-# list: []
-# configuration:
-# file:
-# name: ''
-# description: ''
-# options:
-# description: ''
-# folding:
-# title: ''
-# enabled: true
-# list: []
-# examples:
-# folding:
-# enabled: true
-# title: ''
-# list: []
-# troubleshooting:
-# problems:
-# list: []
-# alerts: []
-# metrics:
-# folding:
-# title: Metrics
-# enabled: false
-# description: ""
-# availability: []
-# scopes:
-# - name: GPU
-# description: ""
-# labels: []
-# metrics:
-# - name: nvidia_smi.pci_bandwidth
-# description: PCI Express Bandwidth Utilization
-# unit: "KiB/s"
-# chart_type: area
-# dimensions:
-# - name: rx
-# - name: tx
-# - name: nvidia_smi.pci_bandwidth_percent
-# description: PCI Express Bandwidth Percent
-# unit: "percentage"
-# chart_type: area
-# dimensions:
-# - name: rx_percent
-# - name: tx_percent
-# - name: nvidia_smi.fan_speed
-# description: Fan Speed
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: speed
-# - name: nvidia_smi.gpu_utilization
-# description: GPU Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: utilization
-# - name: nvidia_smi.mem_utilization
-# description: Memory Bandwidth Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: utilization
-# - name: nvidia_smi.encoder_utilization
-# description: Encoder/Decoder Utilization
-# unit: "percentage"
-# chart_type: line
-# dimensions:
-# - name: encoder
-# - name: decoder
-# - name: nvidia_smi.memory_allocated
-# description: Memory Usage
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: free
-# - name: used
-# - name: nvidia_smi.bar1_memory_usage
-# description: Bar1 Memory Usage
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: free
-# - name: used
-# - name: nvidia_smi.temperature
-# description: Temperature
-# unit: "celsius"
-# chart_type: line
-# dimensions:
-# - name: temp
-# - name: nvidia_smi.clocks
-# description: Clock Frequencies
-# unit: "MHz"
-# chart_type: line
-# dimensions:
-# - name: graphics
-# - name: video
-# - name: sm
-# - name: mem
-# - name: nvidia_smi.power
-# description: Power Utilization
-# unit: "Watts"
-# chart_type: line
-# dimensions:
-# - name: power
-# - name: nvidia_smi.power_state
-# description: Power State
-# unit: "state"
-# chart_type: line
-# dimensions:
-# - name: a dimension per {power_state}
-# - name: nvidia_smi.processes_mem
-# description: Memory Used by Each Process
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per process
-# - name: nvidia_smi.user_mem
-# description: Memory Used by Each User
-# unit: "MiB"
-# chart_type: stacked
-# dimensions:
-# - name: a dimension per user
-# - name: nvidia_smi.user_num
-# description: Number of User on GPU
-# unit: "num"
-# chart_type: line
-# dimensions:
-# - name: users
diff --git a/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py b/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
deleted file mode 100644
index 556a61435..000000000
--- a/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.chart.py
+++ /dev/null
@@ -1,651 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: nvidia-smi netdata python.d module
-# Original Author: Steven Noonan (tycho)
-# Author: Ilya Mashchenko (ilyam8)
-# User Memory Stat Author: Guido Scatena (scatenag)
-
-import os
-import pwd
-import subprocess
-import threading
-import xml.etree.ElementTree as et
-
-from bases.FrameworkServices.SimpleService import SimpleService
-from bases.collection import find_binary
-
-disabled_by_default = True
-
-NVIDIA_SMI = 'nvidia-smi'
-
-NOT_AVAILABLE = 'N/A'
-
-EMPTY_ROW = ''
-EMPTY_ROW_LIMIT = 500
-POLLER_BREAK_ROW = '</nvidia_smi_log>'
-
-PCI_BANDWIDTH = 'pci_bandwidth'
-PCI_BANDWIDTH_PERCENT = 'pci_bandwidth_percent'
-FAN_SPEED = 'fan_speed'
-GPU_UTIL = 'gpu_utilization'
-MEM_UTIL = 'mem_utilization'
-ENCODER_UTIL = 'encoder_utilization'
-MEM_USAGE = 'mem_usage'
-BAR_USAGE = 'bar1_mem_usage'
-TEMPERATURE = 'temperature'
-CLOCKS = 'clocks'
-POWER = 'power'
-POWER_STATE = 'power_state'
-PROCESSES_MEM = 'processes_mem'
-USER_MEM = 'user_mem'
-USER_NUM = 'user_num'
-
-ORDER = [
- PCI_BANDWIDTH,
- PCI_BANDWIDTH_PERCENT,
- FAN_SPEED,
- GPU_UTIL,
- MEM_UTIL,
- ENCODER_UTIL,
- MEM_USAGE,
- BAR_USAGE,
- TEMPERATURE,
- CLOCKS,
- POWER,
- POWER_STATE,
- PROCESSES_MEM,
- USER_MEM,
- USER_NUM,
-]
-
-# https://docs.nvidia.com/gameworks/content/gameworkslibrary/coresdk/nvapi/group__gpupstate.html
-POWER_STATES = ['P' + str(i) for i in range(0, 16)]
-
-# PCI Transfer data rate in gigabits per second (Gb/s) per generation
-PCI_SPEED = {
- "1": 2.5,
- "2": 5,
- "3": 8,
- "4": 16,
- "5": 32
-}
-# PCI encoding per generation
-PCI_ENCODING = {
- "1": 2 / 10,
- "2": 2 / 10,
- "3": 2 / 130,
- "4": 2 / 130,
- "5": 2 / 130
-}
-
-
-def gpu_charts(gpu):
- fam = gpu.full_name()
-
- charts = {
- PCI_BANDWIDTH: {
- 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
- 'lines': [
- ['rx_util', 'rx', 'absolute', 1, 1],
- ['tx_util', 'tx', 'absolute', 1, -1],
- ]
- },
- PCI_BANDWIDTH_PERCENT: {
- 'options': [None, 'PCI Express Bandwidth Percent', 'percentage', fam, 'nvidia_smi.pci_bandwidth_percent',
- 'area'],
- 'lines': [
- ['rx_util_percent', 'rx_percent'],
- ['tx_util_percent', 'tx_percent'],
- ]
- },
- FAN_SPEED: {
- 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
- 'lines': [
- ['fan_speed', 'speed'],
- ]
- },
- GPU_UTIL: {
- 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
- 'lines': [
- ['gpu_util', 'utilization'],
- ]
- },
- MEM_UTIL: {
- 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
- 'lines': [
- ['memory_util', 'utilization'],
- ]
- },
- ENCODER_UTIL: {
- 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization',
- 'line'],
- 'lines': [
- ['encoder_util', 'encoder'],
- ['decoder_util', 'decoder'],
- ]
- },
- MEM_USAGE: {
- 'options': [None, 'Memory Usage', 'MiB', fam, 'nvidia_smi.memory_allocated', 'stacked'],
- 'lines': [
- ['fb_memory_free', 'free'],
- ['fb_memory_used', 'used'],
- ]
- },
- BAR_USAGE: {
- 'options': [None, 'Bar1 Memory Usage', 'MiB', fam, 'nvidia_smi.bar1_memory_usage', 'stacked'],
- 'lines': [
- ['bar1_memory_free', 'free'],
- ['bar1_memory_used', 'used'],
- ]
- },
- TEMPERATURE: {
- 'options': [None, 'Temperature', 'celsius', fam, 'nvidia_smi.temperature', 'line'],
- 'lines': [
- ['gpu_temp', 'temp'],
- ]
- },
- CLOCKS: {
- 'options': [None, 'Clock Frequencies', 'MHz', fam, 'nvidia_smi.clocks', 'line'],
- 'lines': [
- ['graphics_clock', 'graphics'],
- ['video_clock', 'video'],
- ['sm_clock', 'sm'],
- ['mem_clock', 'mem'],
- ]
- },
- POWER: {
- 'options': [None, 'Power Utilization', 'Watts', fam, 'nvidia_smi.power', 'line'],
- 'lines': [
- ['power_draw', 'power', 'absolute', 1, 100],
- ]
- },
- POWER_STATE: {
- 'options': [None, 'Power State', 'state', fam, 'nvidia_smi.power_state', 'line'],
- 'lines': [['power_state_' + v.lower(), v, 'absolute'] for v in POWER_STATES]
- },
- PROCESSES_MEM: {
- 'options': [None, 'Memory Used by Each Process', 'MiB', fam, 'nvidia_smi.processes_mem', 'stacked'],
- 'lines': []
- },
- USER_MEM: {
- 'options': [None, 'Memory Used by Each User', 'MiB', fam, 'nvidia_smi.user_mem', 'stacked'],
- 'lines': []
- },
- USER_NUM: {
- 'options': [None, 'Number of User on GPU', 'num', fam, 'nvidia_smi.user_num', 'line'],
- 'lines': [
- ['user_num', 'users'],
- ]
- },
- }
-
- idx = gpu.num
-
- order = ['gpu{0}_{1}'.format(idx, v) for v in ORDER]
- charts = dict(('gpu{0}_{1}'.format(idx, k), v) for k, v in charts.items())
-
- for chart in charts.values():
- for line in chart['lines']:
- line[0] = 'gpu{0}_{1}'.format(idx, line[0])
-
- return order, charts
-
-
-class NvidiaSMI:
- def __init__(self):
- self.command = find_binary(NVIDIA_SMI)
- self.active_proc = None
-
- def run_once(self):
- proc = subprocess.Popen([self.command, '-x', '-q'], stdout=subprocess.PIPE)
- stdout, _ = proc.communicate()
- return stdout
-
- def run_loop(self, interval):
- if self.active_proc:
- self.kill()
- proc = subprocess.Popen([self.command, '-x', '-q', '-l', str(interval)], stdout=subprocess.PIPE)
- self.active_proc = proc
- return proc.stdout
-
- def kill(self):
- if self.active_proc:
- self.active_proc.kill()
- self.active_proc = None
-
-
-class NvidiaSMIPoller(threading.Thread):
- def __init__(self, poll_interval):
- threading.Thread.__init__(self)
- self.daemon = True
-
- self.smi = NvidiaSMI()
- self.interval = poll_interval
-
- self.lock = threading.RLock()
- self.last_data = str()
- self.exit = False
- self.empty_rows = 0
- self.rows = list()
-
- def has_smi(self):
- return bool(self.smi.command)
-
- def run_once(self):
- return self.smi.run_once()
-
- def run(self):
- out = self.smi.run_loop(self.interval)
-
- for row in out:
- if self.exit or self.empty_rows > EMPTY_ROW_LIMIT:
- break
- self.process_row(row)
- self.smi.kill()
-
- def process_row(self, row):
- row = row.decode()
- self.empty_rows += (row == EMPTY_ROW)
- self.rows.append(row)
-
- if POLLER_BREAK_ROW in row:
- self.lock.acquire()
- self.last_data = '\n'.join(self.rows)
- self.lock.release()
-
- self.rows = list()
- self.empty_rows = 0
-
- def is_started(self):
- return self.ident is not None
-
- def shutdown(self):
- self.exit = True
-
- def data(self):
- self.lock.acquire()
- data = self.last_data
- self.lock.release()
- return data
-
-
-def handle_attr_error(method):
- def on_call(*args, **kwargs):
- try:
- return method(*args, **kwargs)
- except AttributeError:
- return None
-
- return on_call
-
-
-def handle_value_error(method):
- def on_call(*args, **kwargs):
- try:
- return method(*args, **kwargs)
- except ValueError:
- return None
-
- return on_call
-
-
-HOST_PREFIX = os.getenv('NETDATA_HOST_PREFIX')
-ETC_PASSWD_PATH = '/etc/passwd'
-PROC_PATH = '/proc'
-
-IS_INSIDE_DOCKER = False
-
-if HOST_PREFIX:
- ETC_PASSWD_PATH = os.path.join(HOST_PREFIX, ETC_PASSWD_PATH[1:])
- PROC_PATH = os.path.join(HOST_PREFIX, PROC_PATH[1:])
- IS_INSIDE_DOCKER = True
-
-
-def read_passwd_file():
- data = dict()
- with open(ETC_PASSWD_PATH, 'r') as f:
- for line in f:
- line = line.strip()
- if line.startswith("#"):
- continue
- fields = line.split(":")
- # name, passwd, uid, gid, comment, home_dir, shell
- if len(fields) != 7:
- continue
- # uid, guid
- fields[2], fields[3] = int(fields[2]), int(fields[3])
- data[fields[2]] = fields
- return data
-
-
-def read_passwd_file_safe():
- try:
- if IS_INSIDE_DOCKER:
- return read_passwd_file()
- return dict((k[2], k) for k in pwd.getpwall())
- except (OSError, IOError):
- return dict()
-
-
-def get_username_by_pid_safe(pid, passwd_file):
- path = os.path.join(PROC_PATH, pid)
- try:
- uid = os.stat(path).st_uid
- except (OSError, IOError):
- return ''
- try:
- if IS_INSIDE_DOCKER:
- return passwd_file[uid][0]
- return pwd.getpwuid(uid)[0]
- except KeyError:
- return str(uid)
-
-
-class GPU:
- def __init__(self, num, root, exclude_zero_memory_users=False):
- self.num = num
- self.root = root
- self.exclude_zero_memory_users = exclude_zero_memory_users
-
- def id(self):
- return self.root.get('id')
-
- def name(self):
- return self.root.find('product_name').text
-
- def full_name(self):
- return 'gpu{0} {1}'.format(self.num, self.name())
-
- @handle_attr_error
- def pci_link_gen(self):
- return self.root.find('pci').find('pci_gpu_link_info').find('pcie_gen').find('max_link_gen').text
-
- @handle_attr_error
- def pci_link_width(self):
- info = self.root.find('pci').find('pci_gpu_link_info')
- return info.find('link_widths').find('max_link_width').text.split('x')[0]
-
- def pci_bw_max(self):
- link_gen = self.pci_link_gen()
- link_width = int(self.pci_link_width())
- if link_gen not in PCI_SPEED or link_gen not in PCI_ENCODING or not link_width:
- return None
- # Maximum PCIe Bandwidth = SPEED * WIDTH * (1 - ENCODING) - 1Gb/s.
- # see details https://enterprise-support.nvidia.com/s/article/understanding-pcie-configuration-for-maximum-performance
- # return max bandwidth in kilobytes per second (kB/s)
- return (PCI_SPEED[link_gen] * link_width * (1 - PCI_ENCODING[link_gen]) - 1) * 1000 * 1000 / 8
-
- @handle_attr_error
- def rx_util(self):
- return self.root.find('pci').find('rx_util').text.split()[0]
-
- @handle_attr_error
- def tx_util(self):
- return self.root.find('pci').find('tx_util').text.split()[0]
-
- @handle_attr_error
- def fan_speed(self):
- return self.root.find('fan_speed').text.split()[0]
-
- @handle_attr_error
- def gpu_util(self):
- return self.root.find('utilization').find('gpu_util').text.split()[0]
-
- @handle_attr_error
- def memory_util(self):
- return self.root.find('utilization').find('memory_util').text.split()[0]
-
- @handle_attr_error
- def encoder_util(self):
- return self.root.find('utilization').find('encoder_util').text.split()[0]
-
- @handle_attr_error
- def decoder_util(self):
- return self.root.find('utilization').find('decoder_util').text.split()[0]
-
- @handle_attr_error
- def fb_memory_used(self):
- return self.root.find('fb_memory_usage').find('used').text.split()[0]
-
- @handle_attr_error
- def fb_memory_free(self):
- return self.root.find('fb_memory_usage').find('free').text.split()[0]
-
- @handle_attr_error
- def bar1_memory_used(self):
- return self.root.find('bar1_memory_usage').find('used').text.split()[0]
-
- @handle_attr_error
- def bar1_memory_free(self):
- return self.root.find('bar1_memory_usage').find('free').text.split()[0]
-
- @handle_attr_error
- def temperature(self):
- return self.root.find('temperature').find('gpu_temp').text.split()[0]
-
- @handle_attr_error
- def graphics_clock(self):
- return self.root.find('clocks').find('graphics_clock').text.split()[0]
-
- @handle_attr_error
- def video_clock(self):
- return self.root.find('clocks').find('video_clock').text.split()[0]
-
- @handle_attr_error
- def sm_clock(self):
- return self.root.find('clocks').find('sm_clock').text.split()[0]
-
- @handle_attr_error
- def mem_clock(self):
- return self.root.find('clocks').find('mem_clock').text.split()[0]
-
- @handle_attr_error
- def power_readings(self):
- elem = self.root.find('power_readings')
- return elem if elem else self.root.find('gpu_power_readings')
-
- @handle_attr_error
- def power_state(self):
- return str(self.power_readings().find('power_state').text.split()[0])
-
- @handle_value_error
- @handle_attr_error
- def power_draw(self):
- return float(self.power_readings().find('power_draw').text.split()[0]) * 100
-
- @handle_attr_error
- def processes(self):
- processes_info = self.root.find('processes').findall('process_info')
- if not processes_info:
- return list()
-
- passwd_file = read_passwd_file_safe()
- processes = list()
-
- for info in processes_info:
- pid = info.find('pid').text
- processes.append({
- 'pid': int(pid),
- 'process_name': info.find('process_name').text,
- 'used_memory': int(info.find('used_memory').text.split()[0]),
- 'username': get_username_by_pid_safe(pid, passwd_file),
- })
- return processes
-
- def data(self):
- data = {
- 'rx_util': self.rx_util(),
- 'tx_util': self.tx_util(),
- 'fan_speed': self.fan_speed(),
- 'gpu_util': self.gpu_util(),
- 'memory_util': self.memory_util(),
- 'encoder_util': self.encoder_util(),
- 'decoder_util': self.decoder_util(),
- 'fb_memory_used': self.fb_memory_used(),
- 'fb_memory_free': self.fb_memory_free(),
- 'bar1_memory_used': self.bar1_memory_used(),
- 'bar1_memory_free': self.bar1_memory_free(),
- 'gpu_temp': self.temperature(),
- 'graphics_clock': self.graphics_clock(),
- 'video_clock': self.video_clock(),
- 'sm_clock': self.sm_clock(),
- 'mem_clock': self.mem_clock(),
- 'power_draw': self.power_draw(),
- }
-
- if self.rx_util() != NOT_AVAILABLE and self.tx_util() != NOT_AVAILABLE:
- pci_bw_max = self.pci_bw_max()
- if not pci_bw_max:
- data['rx_util_percent'] = 0
- data['tx_util_percent'] = 0
- else:
- data['rx_util_percent'] = str(int(int(self.rx_util()) * 100 / self.pci_bw_max()))
- data['tx_util_percent'] = str(int(int(self.tx_util()) * 100 / self.pci_bw_max()))
-
- for v in POWER_STATES:
- data['power_state_' + v.lower()] = 0
- p_state = self.power_state()
- if p_state:
- data['power_state_' + p_state.lower()] = 1
-
- processes = self.processes() or []
- users = set()
- for p in processes:
- data['process_mem_{0}'.format(p['pid'])] = p['used_memory']
- if p['username']:
- if self.exclude_zero_memory_users and p['used_memory'] == 0:
- continue
- users.add(p['username'])
- key = 'user_mem_{0}'.format(p['username'])
- if key in data:
- data[key] += p['used_memory']
- else:
- data[key] = p['used_memory']
- data['user_num'] = len(users)
-
- return dict(('gpu{0}_{1}'.format(self.num, k), v) for k, v in data.items())
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = list()
- self.definitions = dict()
- self.loop_mode = configuration.get('loop_mode', True)
- poll = int(configuration.get('poll_seconds', self.get_update_every()))
- self.exclude_zero_memory_users = configuration.get('exclude_zero_memory_users', False)
- self.poller = NvidiaSMIPoller(poll)
-
- def get_data_loop_mode(self):
- if not self.poller.is_started():
- self.poller.start()
-
- if not self.poller.is_alive():
- self.debug('poller is off')
- return None
-
- return self.poller.data()
-
- def get_data_normal_mode(self):
- return self.poller.run_once()
-
- def get_data(self):
- if self.loop_mode:
- last_data = self.get_data_loop_mode()
- else:
- last_data = self.get_data_normal_mode()
-
- if not last_data:
- return None
-
- parsed = self.parse_xml(last_data)
- if parsed is None:
- return None
-
- data = dict()
- for idx, root in enumerate(parsed.findall('gpu')):
- gpu = GPU(idx, root, self.exclude_zero_memory_users)
- gpu_data = gpu.data()
- # self.debug(gpu_data)
- gpu_data = dict((k, v) for k, v in gpu_data.items() if is_gpu_data_value_valid(v))
- data.update(gpu_data)
- self.update_processes_mem_chart(gpu)
- self.update_processes_user_mem_chart(gpu)
-
- return data or None
-
- def update_processes_mem_chart(self, gpu):
- ps = gpu.processes()
- if not ps:
- return
- chart = self.charts['gpu{0}_{1}'.format(gpu.num, PROCESSES_MEM)]
- active_dim_ids = []
- for p in ps:
- dim_id = 'gpu{0}_process_mem_{1}'.format(gpu.num, p['pid'])
- active_dim_ids.append(dim_id)
- if dim_id not in chart:
- chart.add_dimension([dim_id, '{0} {1}'.format(p['pid'], p['process_name'])])
- for dim in chart:
- if dim.id not in active_dim_ids:
- chart.del_dimension(dim.id, hide=False)
-
- def update_processes_user_mem_chart(self, gpu):
- ps = gpu.processes()
- if not ps:
- return
- chart = self.charts['gpu{0}_{1}'.format(gpu.num, USER_MEM)]
- active_dim_ids = []
- for p in ps:
- if not p.get('username'):
- continue
- dim_id = 'gpu{0}_user_mem_{1}'.format(gpu.num, p['username'])
- active_dim_ids.append(dim_id)
- if dim_id not in chart:
- chart.add_dimension([dim_id, '{0}'.format(p['username'])])
-
- for dim in chart:
- if dim.id not in active_dim_ids:
- chart.del_dimension(dim.id, hide=False)
-
- def check(self):
- if not self.poller.has_smi():
- self.error("couldn't find '{0}' binary".format(NVIDIA_SMI))
- return False
-
- raw_data = self.poller.run_once()
- if not raw_data:
- self.error("failed to invoke '{0}' binary".format(NVIDIA_SMI))
- return False
-
- parsed = self.parse_xml(raw_data)
- if parsed is None:
- return False
-
- gpus = parsed.findall('gpu')
- if not gpus:
- return False
-
- self.create_charts(gpus)
-
- return True
-
- def parse_xml(self, data):
- try:
- return et.fromstring(data)
- except et.ParseError as error:
- self.error('xml parse failed: "{0}", error: {1}'.format(data, error))
-
- return None
-
- def create_charts(self, gpus):
- for idx, root in enumerate(gpus):
- order, charts = gpu_charts(GPU(idx, root))
- self.order.extend(order)
- self.definitions.update(charts)
-
-
-def is_gpu_data_value_valid(value):
- try:
- int(value)
- except (TypeError, ValueError):
- return False
- return True
diff --git a/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf b/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
deleted file mode 100644
index 3d2a30d41..000000000
--- a/src/collectors/python.d.plugin/nvidia_smi/nvidia_smi.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for nvidia_smi
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, example also supports the following:
-#
-# loop_mode: yes/no # default is yes. If set to yes `nvidia-smi` is executed in a separate thread using `-l` option.
-# poll_seconds: SECONDS # default is 1. Sets the frequency of seconds the nvidia-smi tool is polled in loop mode.
-# exclude_zero_memory_users: yes/no # default is no. Whether to collect users metrics with 0Mb memory allocation.
-#
-# ----------------------------------------------------------------------
diff --git a/src/collectors/python.d.plugin/openldap/integrations/openldap.md b/src/collectors/python.d.plugin/openldap/integrations/openldap.md
index 97199f7dd..3f363343a 100644
--- a/src/collectors/python.d.plugin/openldap/integrations/openldap.md
+++ b/src/collectors/python.d.plugin/openldap/integrations/openldap.md
@@ -190,6 +190,7 @@ timeout: 1
### Debug Mode
+
To troubleshoot issues with the `openldap` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -212,4 +213,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin openldap debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `openldap` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep openldap
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep openldap /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep openldap
+```
+
diff --git a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
index 5b98fbd20..4cf1b54a4 100644
--- a/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
+++ b/src/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
@@ -201,6 +201,7 @@ remote:
### Debug Mode
+
To troubleshoot issues with the `oracledb` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -223,4 +224,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin oracledb debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `oracledb` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep oracledb
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep oracledb /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep oracledb
+```
+
diff --git a/src/collectors/python.d.plugin/pandas/integrations/pandas.md b/src/collectors/python.d.plugin/pandas/integrations/pandas.md
index 898e23f0a..e0b5418c5 100644
--- a/src/collectors/python.d.plugin/pandas/integrations/pandas.md
+++ b/src/collectors/python.d.plugin/pandas/integrations/pandas.md
@@ -340,6 +340,7 @@ sql:
### Debug Mode
+
To troubleshoot issues with the `pandas` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -362,4 +363,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin pandas debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `pandas` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep pandas
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep pandas /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep pandas
+```
+
diff --git a/src/collectors/python.d.plugin/postfix/integrations/postfix.md b/src/collectors/python.d.plugin/postfix/integrations/postfix.md
deleted file mode 100644
index 32cc52fbb..000000000
--- a/src/collectors/python.d.plugin/postfix/integrations/postfix.md
+++ /dev/null
@@ -1,151 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/postfix/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/postfix/metadata.yaml"
-sidebar_label: "Postfix"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Mail Servers"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Postfix
-
-
-<img src="https://netdata.cloud/img/postfix.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: postfix
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-Keep an eye on Postfix metrics for efficient mail server operations.
-Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
-
-
-Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
-See the `authorized_mailq_users` setting in the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector executes `postqueue -p` to get Postfix queue statistics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Postfix instance
-
-These metrics refer to the entire monitored application.
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| postfix.qemails | emails | emails |
-| postfix.qsize | size | KiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-There is no configuration file.
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-There are no configuration examples.
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `postfix` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin postfix debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/postfix/metadata.yaml b/src/collectors/python.d.plugin/postfix/metadata.yaml
deleted file mode 100644
index 1bbb61164..000000000
--- a/src/collectors/python.d.plugin/postfix/metadata.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: postfix
- monitored_instance:
- name: Postfix
- link: https://www.postfix.org/
- categories:
- - data-collection.mail-servers
- icon_filename: "postfix.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - postfix
- - mail
- - mail server
- most_popular: false
- overview:
- data_collection:
- metrics_description: >
- Keep an eye on Postfix metrics for efficient mail server operations.
-
- Improve your mail server performance with Netdata's real-time metrics and built-in alerts.
- method_description: >
- Monitors MTA email queue statistics using [postqueue](http://www.postfix.org/postqueue.1.html) tool.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: >
- Postfix has internal access controls that limit activities on the mail queue. By default, all users are allowed to view
- the queue. If your system is configured with stricter access controls, you need to grant the `netdata` user access to
- view the mail queue. In order to do it, add `netdata` to `authorized_mailq_users` in the `/etc/postfix/main.cf` file.
-
- See the `authorized_mailq_users` setting in
- the [Postfix documentation](https://www.postfix.org/postconf.5.html) for more details.
- default_behavior:
- auto_detection:
- description: "The collector executes `postqueue -p` to get Postfix queue statistics."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: Config options
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 1
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: |
- These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: postfix.qemails
- description: Postfix Queue Emails
- unit: "emails"
- chart_type: line
- dimensions:
- - name: emails
- - name: postfix.qsize
- description: Postfix Queue Emails Size
- unit: "KiB"
- chart_type: area
- dimensions:
- - name: size
diff --git a/src/collectors/python.d.plugin/postfix/postfix.chart.py b/src/collectors/python.d.plugin/postfix/postfix.chart.py
deleted file mode 100644
index b650514ee..000000000
--- a/src/collectors/python.d.plugin/postfix/postfix.chart.py
+++ /dev/null
@@ -1,52 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: postfix netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.ExecutableService import ExecutableService
-
-POSTQUEUE_COMMAND = 'postqueue -p'
-
-ORDER = [
- 'qemails',
- 'qsize',
-]
-
-CHARTS = {
- 'qemails': {
- 'options': [None, 'Postfix Queue Emails', 'emails', 'queue', 'postfix.qemails', 'line'],
- 'lines': [
- ['emails', None, 'absolute']
- ]
- },
- 'qsize': {
- 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
- 'lines': [
- ['size', None, 'absolute']
- ]
- }
-}
-
-
-class Service(ExecutableService):
- def __init__(self, configuration=None, name=None):
- ExecutableService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.command = POSTQUEUE_COMMAND
-
- def _get_data(self):
- """
- Format data received from shell command
- :return: dict
- """
- try:
- raw = self._get_raw_data()[-1].split(' ')
- if raw[0] == 'Mail' and raw[1] == 'queue':
- return {'emails': 0,
- 'size': 0}
-
- return {'emails': raw[4],
- 'size': raw[1]}
- except (ValueError, AttributeError):
- return None
diff --git a/src/collectors/python.d.plugin/postfix/postfix.conf b/src/collectors/python.d.plugin/postfix/postfix.conf
deleted file mode 100644
index a4d2472ee..000000000
--- a/src/collectors/python.d.plugin/postfix/postfix.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for postfix
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# postfix is slow, so once every 10 seconds
-update_every: 10
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, postfix also supports the following:
-#
-# command: 'postqueue -p' # the command to run
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-
-local:
- command: 'postqueue -p'
diff --git a/src/collectors/python.d.plugin/puppet/integrations/puppet.md b/src/collectors/python.d.plugin/puppet/integrations/puppet.md
deleted file mode 100644
index 438f9bdc9..000000000
--- a/src/collectors/python.d.plugin/puppet/integrations/puppet.md
+++ /dev/null
@@ -1,215 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/puppet/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/puppet/metadata.yaml"
-sidebar_label: "Puppet"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/CICD Platforms"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Puppet
-
-
-<img src="https://netdata.cloud/img/puppet.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: puppet
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
-
-
-It uses Puppet's metrics API endpoint to gather the metrics.
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Puppet instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| puppet.jvm_heap | committed, used | MiB |
-| puppet.jvm_nonheap | committed, used | MiB |
-| puppet.cpu | execution, GC | percentage |
-| puppet.fdopen | used | descriptors |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/puppet.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/puppet.conf
-```
-#### Options
-
-This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-> Notes:
-> - Exact Fully Qualified Domain Name of the node should be used.
-> - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
-> - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |
-| tls_verify | Control HTTPS server certificate verification. | False | no |
-| tls_ca_file | Optional CA (bundle) file to use | | no |
-| tls_cert_file | Optional client certificate file | | no |
-| tls_key_file | Optional client key file | | no |
-| update_every | Sets the default data collection frequency. | 30 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration
-
-```yaml
-puppetserver:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
-```
-##### TLS Certificate
-
-An example using a TLS certificate
-
-<details open><summary>Config</summary>
-
-```yaml
-puppetdb:
- url: 'https://fqdn.example.com:8081'
- tls_cert_file: /path/to/client.crt
- tls_key_file: /path/to/client.key
- autodetection_retry: 1
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-puppetserver1:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
-puppetserver2:
- url: 'https://fqdn.example2.com:8140'
- autodetection_retry: 1
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `puppet` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin puppet debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/puppet/metadata.yaml b/src/collectors/python.d.plugin/puppet/metadata.yaml
deleted file mode 100644
index 5f68dca7f..000000000
--- a/src/collectors/python.d.plugin/puppet/metadata.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: puppet
- monitored_instance:
- name: Puppet
- link: "https://www.puppet.com/"
- categories:
- - data-collection.ci-cd-systems
- icon_filename: "puppet.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - puppet
- - jvm heap
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Puppet metrics about JVM Heap, Non-Heap, CPU usage and file descriptors.'
- method_description: |
- It uses Puppet's metrics API endpoint to gather the metrics.
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: By default, this collector will use `https://fqdn.example.com:8140` as the URL to look for metrics.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: "python.d/puppet.conf"
- options:
- description: |
- This particular collector does not need further configuration to work if permissions are satisfied, but you can always customize it's data collection behavior.
-
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
- > Notes:
- > - Exact Fully Qualified Domain Name of the node should be used.
- > - Usually Puppet Server/DB startup time is VERY long. So, there should be quite reasonable retry count.
- > - A secured PuppetDB config may require a client certificate. This does not apply to the default PuppetDB configuration though.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: url
- description: HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used.
- default_value: https://fqdn.example.com:8081
- required: true
- - name: tls_verify
- description: Control HTTPS server certificate verification.
- default_value: "False"
- required: false
- - name: tls_ca_file
- description: Optional CA (bundle) file to use
- default_value: ""
- required: false
- - name: tls_cert_file
- description: Optional client certificate file
- default_value: ""
- required: false
- - name: tls_key_file
- description: Optional client key file
- default_value: ""
- required: false
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 30
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- description: A basic example configuration
- folding:
- enabled: false
- config: |
- puppetserver:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
- - name: TLS Certificate
- description: An example using a TLS certificate
- config: |
- puppetdb:
- url: 'https://fqdn.example.com:8081'
- tls_cert_file: /path/to/client.crt
- tls_key_file: /path/to/client.key
- autodetection_retry: 1
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- puppetserver1:
- url: 'https://fqdn.example.com:8140'
- autodetection_retry: 1
-
- puppetserver2:
- url: 'https://fqdn.example2.com:8140'
- autodetection_retry: 1
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: puppet.jvm_heap
- description: JVM Heap
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: committed
- - name: used
- - name: puppet.jvm_nonheap
- description: JVM Non-Heap
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: committed
- - name: used
- - name: puppet.cpu
- description: CPU usage
- unit: "percentage"
- chart_type: stacked
- dimensions:
- - name: execution
- - name: GC
- - name: puppet.fdopen
- description: File Descriptors
- unit: "descriptors"
- chart_type: line
- dimensions:
- - name: used
diff --git a/src/collectors/python.d.plugin/puppet/puppet.chart.py b/src/collectors/python.d.plugin/puppet/puppet.chart.py
deleted file mode 100644
index 0e5b781f5..000000000
--- a/src/collectors/python.d.plugin/puppet/puppet.chart.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: puppet netdata python.d module
-# Author: Andrey Galkin <andrey@futoin.org> (andvgal)
-# SPDX-License-Identifier: GPL-3.0-or-later
-#
-# This module should work both with OpenSource and PE versions
-# of PuppetServer and PuppetDB.
-#
-# NOTE: PuppetDB may be configured to require proper TLS
-# client certificate for security reasons. Use tls_key_file
-# and tls_cert_file options then.
-#
-
-import socket
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-update_every = 5
-
-MiB = 1 << 20
-CPU_SCALE = 1000
-
-ORDER = [
- 'jvm_heap',
- 'jvm_nonheap',
- 'cpu',
- 'fd_open',
-]
-
-CHARTS = {
- 'jvm_heap': {
- 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm_heap', 'area'],
- 'lines': [
- ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
- ['jvm_heap_used', 'used', 'absolute', 1, MiB],
- ],
- 'variables': [
- ['jvm_heap_max'],
- ['jvm_heap_init'],
- ],
- },
- 'jvm_nonheap': {
- 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm_nonheap', 'area'],
- 'lines': [
- ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
- ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
- ],
- 'variables': [
- ['jvm_nonheap_max'],
- ['jvm_nonheap_init'],
- ],
- },
- 'cpu': {
- 'options': [None, 'CPU usage', 'percentage', 'resources', 'puppet.cpu', 'stacked'],
- 'lines': [
- ['cpu_time', 'execution', 'absolute', 1, CPU_SCALE],
- ['gc_time', 'GC', 'absolute', 1, CPU_SCALE],
- ]
- },
- 'fd_open': {
- 'options': [None, 'File Descriptors', 'descriptors', 'resources', 'puppet.fdopen', 'line'],
- 'lines': [
- ['fd_used', 'used', 'absolute'],
- ],
- 'variables': [
- ['fd_max'],
- ],
- },
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = 'https://{0}:8140'.format(socket.getfqdn())
-
- def _get_data(self):
- # NOTE: there are several ways to retrieve data
- # 1. Only PE versions:
- # https://puppet.com/docs/pe/2018.1/api_status/status_api_metrics_endpoints.html
- # 2. Individual Metrics API (JMX):
- # https://puppet.com/docs/pe/2018.1/api_status/metrics_api.html
- # 3. Extended status at debug level:
- # https://puppet.com/docs/pe/2018.1/api_status/status_api_json_endpoints.html
- #
- # For sake of simplicity and efficiency the status one is used..
-
- raw_data = self._get_raw_data(self.url + '/status/v1/services?level=debug')
-
- if raw_data is None:
- return None
-
- raw_data = loads(raw_data)
- data = {}
-
- try:
- try:
- jvm_metrics = raw_data['status-service']['status']['experimental']['jvm-metrics']
- except KeyError:
- jvm_metrics = raw_data['status-service']['status']['jvm-metrics']
-
- heap_mem = jvm_metrics['heap-memory']
- non_heap_mem = jvm_metrics['non-heap-memory']
-
- for k in ['max', 'committed', 'used', 'init']:
- data['jvm_heap_' + k] = heap_mem[k]
- data['jvm_nonheap_' + k] = non_heap_mem[k]
-
- fd_open = jvm_metrics['file-descriptors']
- data['fd_max'] = fd_open['max']
- data['fd_used'] = fd_open['used']
-
- data['cpu_time'] = int(jvm_metrics['cpu-usage'] * CPU_SCALE)
- data['gc_time'] = int(jvm_metrics['gc-cpu-usage'] * CPU_SCALE)
- except KeyError:
- pass
-
- return data or None
diff --git a/src/collectors/python.d.plugin/puppet/puppet.conf b/src/collectors/python.d.plugin/puppet/puppet.conf
deleted file mode 100644
index ff5c3d020..000000000
--- a/src/collectors/python.d.plugin/puppet/puppet.conf
+++ /dev/null
@@ -1,94 +0,0 @@
-# netdata python.d.plugin configuration for Puppet Server and Puppet DB
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# These configuration comes from UrlService base:
-# url: # HTTP or HTTPS URL
-# tls_verify: False # Control HTTPS server certificate verification
-# tls_ca_file: # Optional CA (bundle) file to use
-# tls_cert_file: # Optional client certificate file
-# tls_key_file: # Optional client key file
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-# puppet:
-# url: 'https://<FQDN>:8140'
-#
-
-#
-# Production configuration should look like below.
-#
-# NOTE: usually Puppet Server/DB startup time is VERY long. So, there should
-# be quite reasonable retry count.
-#
-# NOTE: secure PuppetDB config may require client certificate.
-# Not applies to default PuppetDB configuration though.
-#
-# puppetdb:
-# url: 'https://fqdn.example.com:8081'
-# tls_cert_file: /path/to/client.crt
-# tls_key_file: /path/to/client.key
-# autodetection_retry: 1
-#
-# puppetserver:
-# url: 'https://fqdn.example.com:8140'
-# autodetection_retry: 1
-#
diff --git a/src/collectors/python.d.plugin/python.d.conf b/src/collectors/python.d.plugin/python.d.conf
index 470b4bbb7..4fcecc75d 100644
--- a/src/collectors/python.d.plugin/python.d.conf
+++ b/src/collectors/python.d.plugin/python.d.conf
@@ -25,43 +25,21 @@ gc_run: yes
## Garbage collection interval in seconds. Default is 300.
gc_interval: 300
-# alarms: yes
# am2320: yes
# anomalies: no
-# beanstalk: yes
-# bind_rndc: yes
# boinc: yes
# ceph: yes
-# changefinder: no
-# dovecot: yes
# this is just an example
-example: no
-# exim: yes
-# gearman: yes
go_expvar: no
# haproxy: yes
-# icecast: yes
-# ipfs: yes
-# memcached: yes
-# monit: yes
-# nvidia_smi: yes
-# nsd: yes
# openldap: yes
# oracledb: yes
# pandas: yes
-# postfix: yes
-# puppet: yes
-# rethinkdbs: yes
# retroshare: yes
-# riakkv: yes
# samba: yes
# smartd_log: yes
# spigotmc: yes
-# squid: yes
# traefik: yes
-# tomcat: yes
-# tor: yes
-# uwsgi: yes
# varnish: yes
# w1sensor: yes
# zscores: no
@@ -70,17 +48,35 @@ go_expvar: no
## Disabled for existing installations.
adaptec_raid: no # Removed (replaced with go.d/adaptercraid).
apache: no # Removed (replaced with go.d/apache).
+beanstalk: no # Removed (replaced with go.d/beanstalk).
+dovecot: no # Removed (replaced with go.d/dovecot).
elasticsearch: no # Removed (replaced with go.d/elasticsearch).
+exim: no # Removed (replaced with go.d/exim).
fail2ban: no # Removed (replaced with go.d/fail2ban).
freeradius: no # Removed (replaced with go.d/freeradius).
+gearman: no # Removed (replaced with go.d/gearman).
hddtemp: no # Removed (replaced with go.d/hddtemp).
hpssa: no # Removed (replaced with go.d/hpssa).
+icecast: no # Removed (replaced with go.d/icecast)
+ipfs: no # Removed (replaced with go.d/ipfs).
litespeed: no # Removed (replaced with go.d/litespeed).
megacli: no # Removed (replaced with go.d/megacli).
+memcached: no # Removed (replaced with go.d/memcached).
mongodb: no # Removed (replaced with go.d/mongodb).
+monit: no # Removed (replaced with go.d/monit).
mysql: no # Removed (replaced with go.d/mysql).
nginx: no # Removed (replaced with go.d/nginx).
+nsd: no # Removed (replaced with go.d/nsd).
+nvidia_smi: no # Removed (replaced with go.d/nvidia_smi).
+postfix: no # Removed (replaced with go.d/postfix).
postgres: no # Removed (replaced with go.d/postgres).
proxysql: no # Removed (replaced with go.d/proxysql).
redis: no # Removed (replaced with go.d/redis).
+rethinkdbs: no # Removed (replaced with go.d/rethinkdb).
+riakkv: no # Removed (replaced with go.d/riak).
sensors: no # Removed (replaced with go.d/sensors).
+squid: no # Removed (replaced with go.d/squid).
+tomcat: no # Removed (replaced with go.d/tomcat)
+tor: no # Removed (replaced with go.d/tor).
+puppet: no # Removed (replaced with go.d/puppet).
+uwsgi: no # Removed (replaced with go.d/uwsgi).
diff --git a/src/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py b/src/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
deleted file mode 100644
index f873eac83..000000000
--- a/src/collectors/python.d.plugin/python_modules/third_party/lm_sensors.py
+++ /dev/null
@@ -1,327 +0,0 @@
-# SPDX-License-Identifier: LGPL-2.1
-"""
-@package sensors.py
-Python Bindings for libsensors3
-
-use the documentation of libsensors for the low level API.
-see example.py for high level API usage.
-
-@author: Pavel Rojtberg (http://www.rojtberg.net)
-@see: https://github.com/paroj/sensors.py
-@copyright: LGPLv2 (same as libsensors) <http://opensource.org/licenses/LGPL-2.1>
-"""
-
-from ctypes import *
-import ctypes.util
-
-_libc = cdll.LoadLibrary(ctypes.util.find_library("c"))
-# see https://github.com/paroj/sensors.py/issues/1
-_libc.free.argtypes = [c_void_p]
-
-_hdl = cdll.LoadLibrary(ctypes.util.find_library("sensors"))
-
-version = c_char_p.in_dll(_hdl, "libsensors_version").value.decode("ascii")
-
-
-class SensorsError(Exception):
- pass
-
-
-class ErrorWildcards(SensorsError):
- pass
-
-
-class ErrorNoEntry(SensorsError):
- pass
-
-
-class ErrorAccessRead(SensorsError, OSError):
- pass
-
-
-class ErrorKernel(SensorsError, OSError):
- pass
-
-
-class ErrorDivZero(SensorsError, ZeroDivisionError):
- pass
-
-
-class ErrorChipName(SensorsError):
- pass
-
-
-class ErrorBusName(SensorsError):
- pass
-
-
-class ErrorParse(SensorsError):
- pass
-
-
-class ErrorAccessWrite(SensorsError, OSError):
- pass
-
-
-class ErrorIO(SensorsError, IOError):
- pass
-
-
-class ErrorRecursion(SensorsError):
- pass
-
-
-_ERR_MAP = {
- 1: ErrorWildcards,
- 2: ErrorNoEntry,
- 3: ErrorAccessRead,
- 4: ErrorKernel,
- 5: ErrorDivZero,
- 6: ErrorChipName,
- 7: ErrorBusName,
- 8: ErrorParse,
- 9: ErrorAccessWrite,
- 10: ErrorIO,
- 11: ErrorRecursion
-}
-
-
-def raise_sensor_error(errno, message=''):
- raise _ERR_MAP[abs(errno)](message)
-
-
-class bus_id(Structure):
- _fields_ = [("type", c_short),
- ("nr", c_short)]
-
-
-class chip_name(Structure):
- _fields_ = [("prefix", c_char_p),
- ("bus", bus_id),
- ("addr", c_int),
- ("path", c_char_p)]
-
-
-class feature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int)]
-
- # sensors_feature_type
- IN = 0x00
- FAN = 0x01
- TEMP = 0x02
- POWER = 0x03
- ENERGY = 0x04
- CURR = 0x05
- HUMIDITY = 0x06
- MAX_MAIN = 0x7
- VID = 0x10
- INTRUSION = 0x11
- MAX_OTHER = 0x12
- BEEP_ENABLE = 0x18
-
-
-class subfeature(Structure):
- _fields_ = [("name", c_char_p),
- ("number", c_int),
- ("type", c_int),
- ("mapping", c_int),
- ("flags", c_uint)]
-
-
-_hdl.sensors_get_detected_chips.restype = POINTER(chip_name)
-_hdl.sensors_get_features.restype = POINTER(feature)
-_hdl.sensors_get_all_subfeatures.restype = POINTER(subfeature)
-_hdl.sensors_get_label.restype = c_void_p # return pointer instead of str so we can free it
-_hdl.sensors_get_adapter_name.restype = c_char_p # docs do not say whether to free this or not
-_hdl.sensors_strerror.restype = c_char_p
-
-### RAW API ###
-MODE_R = 1
-MODE_W = 2
-COMPUTE_MAPPING = 4
-
-
-def init(cfg_file=None):
- file = _libc.fopen(cfg_file.encode("utf-8"), "r") if cfg_file is not None else None
-
- result = _hdl.sensors_init(file)
- if result != 0:
- raise_sensor_error(result, "sensors_init failed")
-
- if file is not None:
- _libc.fclose(file)
-
-
-def cleanup():
- _hdl.sensors_cleanup()
-
-
-def parse_chip_name(orig_name):
- ret = chip_name()
- err = _hdl.sensors_parse_chip_name(orig_name.encode("utf-8"), byref(ret))
-
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
- return ret
-
-
-def strerror(errnum):
- return _hdl.sensors_strerror(errnum).decode("utf-8")
-
-
-def free_chip_name(chip):
- _hdl.sensors_free_chip_name(byref(chip))
-
-
-def get_detected_chips(match, nr):
- """
- @return: (chip, next nr to query)
- """
- _nr = c_int(nr)
-
- if match is not None:
- match = byref(match)
-
- chip = _hdl.sensors_get_detected_chips(match, byref(_nr))
- chip = chip.contents if bool(chip) else None
- return chip, _nr.value
-
-
-def chip_snprintf_name(chip, buffer_size=200):
- """
- @param buffer_size defaults to the size used in the sensors utility
- """
- ret = create_string_buffer(buffer_size)
- err = _hdl.sensors_snprintf_chip_name(ret, buffer_size, byref(chip))
-
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
- return ret.value.decode("utf-8")
-
-
-def do_chip_sets(chip):
- """
- @attention this function was not tested
- """
- err = _hdl.sensors_do_chip_sets(byref(chip))
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
-
-def get_adapter_name(bus):
- return _hdl.sensors_get_adapter_name(byref(bus)).decode("utf-8")
-
-
-def get_features(chip, nr):
- """
- @return: (feature, next nr to query)
- """
- _nr = c_int(nr)
- feature = _hdl.sensors_get_features(byref(chip), byref(_nr))
- feature = feature.contents if bool(feature) else None
- return feature, _nr.value
-
-
-def get_label(chip, feature):
- ptr = _hdl.sensors_get_label(byref(chip), byref(feature))
- val = cast(ptr, c_char_p).value.decode("utf-8")
- _libc.free(ptr)
- return val
-
-
-def get_all_subfeatures(chip, feature, nr):
- """
- @return: (subfeature, next nr to query)
- """
- _nr = c_int(nr)
- subfeature = _hdl.sensors_get_all_subfeatures(byref(chip), byref(feature), byref(_nr))
- subfeature = subfeature.contents if bool(subfeature) else None
- return subfeature, _nr.value
-
-
-def get_value(chip, subfeature_nr):
- val = c_double()
- err = _hdl.sensors_get_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise_sensor_error(err, strerror(err))
- return val.value
-
-
-def set_value(chip, subfeature_nr, value):
- """
- @attention this function was not tested
- """
- val = c_double(value)
- err = _hdl.sensors_set_value(byref(chip), subfeature_nr, byref(val))
- if err < 0:
- raise_sensor_error(err, strerror(err))
-
-
-### Convenience API ###
-class ChipIterator:
- def __init__(self, match=None):
- self.match = parse_chip_name(match) if match is not None else None
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- chip, self.nr = get_detected_chips(self.match, self.nr)
-
- if chip is None:
- raise StopIteration
-
- return chip
-
- def __del__(self):
- if self.match is not None:
- free_chip_name(self.match)
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class FeatureIterator:
- def __init__(self, chip):
- self.chip = chip
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- feature, self.nr = get_features(self.chip, self.nr)
-
- if feature is None:
- raise StopIteration
-
- return feature
-
- def next(self): # python2 compability
- return self.__next__()
-
-
-class SubFeatureIterator:
- def __init__(self, chip, feature):
- self.chip = chip
- self.feature = feature
- self.nr = 0
-
- def __iter__(self):
- return self
-
- def __next__(self):
- subfeature, self.nr = get_all_subfeatures(self.chip, self.feature, self.nr)
-
- if subfeature is None:
- raise StopIteration
-
- return subfeature
-
- def next(self): # python2 compability
- return self.__next__()
diff --git a/src/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md b/src/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
deleted file mode 100644
index f7da12dd6..000000000
--- a/src/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
+++ /dev/null
@@ -1,190 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/rethinkdbs/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml"
-sidebar_label: "RethinkDB"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RethinkDB
-
-
-<img src="https://netdata.cloud/img/rethinkdb.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: rethinkdbs
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors metrics about RethinkDB clusters and database servers.
-
-It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RethinkDB instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| rethinkdb.cluster_connected_servers | connected, missing | servers |
-| rethinkdb.cluster_clients_active | active | clients |
-| rethinkdb.cluster_queries | queries | queries/s |
-| rethinkdb.cluster_documents | reads, writes | documents/s |
-
-### Per database server
-
-
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| rethinkdb.client_connections | connections | connections |
-| rethinkdb.clients_active | active | clients |
-| rethinkdb.queries | queries | queries/s |
-| rethinkdb.documents | reads, writes | documents/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required python module
-
-The collector requires the `rethinkdb` python module to be installed.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/rethinkdbs.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/rethinkdbs.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| host | Hostname or ip of the RethinkDB server. | localhost | no |
-| port | Port to connect to the RethinkDB server. | 28015 | no |
-| user | The username to use to connect to the RethinkDB server. | admin | no |
-| password | The password to use to connect to the RethinkDB server. | | no |
-| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |
-
-</details>
-
-#### Examples
-
-##### Local RethinkDB server
-
-An example of a configuration for a local RethinkDB server
-
-```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 28015
- user: "user"
- password: "pass"
-
-```
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `rethinkdbs` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin rethinkdbs debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml b/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml
deleted file mode 100644
index bbc50eac6..000000000
--- a/src/collectors/python.d.plugin/rethinkdbs/metadata.yaml
+++ /dev/null
@@ -1,188 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: rethinkdbs
- monitored_instance:
- name: RethinkDB
- link: 'https://rethinkdb.com/'
- categories:
- - data-collection.database-servers
- icon_filename: 'rethinkdb.png'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - rethinkdb
- - database
- - db
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors metrics about RethinkDB clusters and database servers.'
- method_description: 'It uses the `rethinkdb` python module to connect to a RethinkDB server instance and gather statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'When no configuration file is found, the collector tries to connect to 127.0.0.1:28015.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Required python module'
- description: 'The collector requires the `rethinkdb` python module to be installed.'
- configuration:
- file:
- name: python.d/rethinkdbs.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: host
- description: Hostname or ip of the RethinkDB server.
- default_value: 'localhost'
- required: false
- - name: port
- description: Port to connect to the RethinkDB server.
- default_value: '28015'
- required: false
- - name: user
- description: The username to use to connect to the RethinkDB server.
- default_value: 'admin'
- required: false
- - name: password
- description: The password to use to connect to the RethinkDB server.
- default_value: ''
- required: false
- - name: timeout
- description: Set a connect timeout to the RethinkDB server.
- default_value: '2'
- required: false
- examples:
- folding:
- enabled: false
- title: "Config"
- list:
- - name: Local RethinkDB server
- description: An example of a configuration for a local RethinkDB server
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 28015
- user: "user"
- password: "pass"
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: rethinkdb.cluster_connected_servers
- description: Connected Servers
- unit: "servers"
- chart_type: stacked
- dimensions:
- - name: connected
- - name: missing
- - name: rethinkdb.cluster_clients_active
- description: Active Clients
- unit: "clients"
- chart_type: line
- dimensions:
- - name: active
- - name: rethinkdb.cluster_queries
- description: Queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: rethinkdb.cluster_documents
- description: Documents
- unit: "documents/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
- - name: database server
- description: ""
- labels: []
- metrics:
- - name: rethinkdb.client_connections
- description: Client Connections
- unit: "connections"
- chart_type: line
- dimensions:
- - name: connections
- - name: rethinkdb.clients_active
- description: Active Clients
- unit: "clients"
- chart_type: line
- dimensions:
- - name: active
- - name: rethinkdb.queries
- description: Queries
- unit: "queries/s"
- chart_type: line
- dimensions:
- - name: queries
- - name: rethinkdb.documents
- description: Documents
- unit: "documents/s"
- chart_type: line
- dimensions:
- - name: reads
- - name: writes
diff --git a/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py b/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
deleted file mode 100644
index e3fbc3632..000000000
--- a/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.chart.py
+++ /dev/null
@@ -1,247 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: rethinkdb netdata python.d module
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-try:
- import rethinkdb as rdb
-
- HAS_RETHINKDB = True
-except ImportError:
- HAS_RETHINKDB = False
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-ORDER = [
- 'cluster_connected_servers',
- 'cluster_clients_active',
- 'cluster_queries',
- 'cluster_documents',
-]
-
-
-def cluster_charts():
- return {
- 'cluster_connected_servers': {
- 'options': [None, 'Connected Servers', 'servers', 'cluster', 'rethinkdb.cluster_connected_servers',
- 'stacked'],
- 'lines': [
- ['cluster_servers_connected', 'connected'],
- ['cluster_servers_missing', 'missing'],
- ]
- },
- 'cluster_clients_active': {
- 'options': [None, 'Active Clients', 'clients', 'cluster', 'rethinkdb.cluster_clients_active',
- 'line'],
- 'lines': [
- ['cluster_clients_active', 'active'],
- ]
- },
- 'cluster_queries': {
- 'options': [None, 'Queries', 'queries/s', 'cluster', 'rethinkdb.cluster_queries', 'line'],
- 'lines': [
- ['cluster_queries_per_sec', 'queries'],
- ]
- },
- 'cluster_documents': {
- 'options': [None, 'Documents', 'documents/s', 'cluster', 'rethinkdb.cluster_documents', 'line'],
- 'lines': [
- ['cluster_read_docs_per_sec', 'reads'],
- ['cluster_written_docs_per_sec', 'writes'],
- ]
- },
- }
-
-
-def server_charts(n):
- o = [
- '{0}_client_connections'.format(n),
- '{0}_clients_active'.format(n),
- '{0}_queries'.format(n),
- '{0}_documents'.format(n),
- ]
- f = 'server {0}'.format(n)
-
- c = {
- o[0]: {
- 'options': [None, 'Client Connections', 'connections', f, 'rethinkdb.client_connections', 'line'],
- 'lines': [
- ['{0}_client_connections'.format(n), 'connections'],
- ]
- },
- o[1]: {
- 'options': [None, 'Active Clients', 'clients', f, 'rethinkdb.clients_active', 'line'],
- 'lines': [
- ['{0}_clients_active'.format(n), 'active'],
- ]
- },
- o[2]: {
- 'options': [None, 'Queries', 'queries/s', f, 'rethinkdb.queries', 'line'],
- 'lines': [
- ['{0}_queries_total'.format(n), 'queries', 'incremental'],
- ]
- },
- o[3]: {
- 'options': [None, 'Documents', 'documents/s', f, 'rethinkdb.documents', 'line'],
- 'lines': [
- ['{0}_read_docs_total'.format(n), 'reads', 'incremental'],
- ['{0}_written_docs_total'.format(n), 'writes', 'incremental'],
- ]
- },
- }
-
- return o, c
-
-
-class Cluster:
- def __init__(self, raw):
- self.raw = raw
-
- def data(self):
- qe = self.raw['query_engine']
-
- return {
- 'cluster_clients_active': qe['clients_active'],
- 'cluster_queries_per_sec': qe['queries_per_sec'],
- 'cluster_read_docs_per_sec': qe['read_docs_per_sec'],
- 'cluster_written_docs_per_sec': qe['written_docs_per_sec'],
- 'cluster_servers_connected': 0,
- 'cluster_servers_missing': 0,
- }
-
-
-class Server:
- def __init__(self, raw):
- self.name = raw['server']
- self.raw = raw
-
- def error(self):
- return self.raw.get('error')
-
- def data(self):
- qe = self.raw['query_engine']
-
- d = {
- 'client_connections': qe['client_connections'],
- 'clients_active': qe['clients_active'],
- 'queries_total': qe['queries_total'],
- 'read_docs_total': qe['read_docs_total'],
- 'written_docs_total': qe['written_docs_total'],
- }
-
- return dict(('{0}_{1}'.format(self.name, k), d[k]) for k in d)
-
-
-# https://pypi.org/project/rethinkdb/2.4.0/
-# rdb.RethinkDB() can be used as rdb drop in replacement.
-# https://github.com/rethinkdb/rethinkdb-python#quickstart
-def get_rethinkdb():
- if hasattr(rdb, 'RethinkDB'):
- return rdb.RethinkDB()
- return rdb
-
-
-class Service(SimpleService):
- def __init__(self, configuration=None, name=None):
- SimpleService.__init__(self, configuration=configuration, name=name)
- self.order = list(ORDER)
- self.definitions = cluster_charts()
- self.host = self.configuration.get('host', '127.0.0.1')
- self.port = self.configuration.get('port', 28015)
- self.user = self.configuration.get('user', 'admin')
- self.password = self.configuration.get('password')
- self.timeout = self.configuration.get('timeout', 2)
- self.rdb = None
- self.conn = None
- self.alive = True
-
- def check(self):
- if not HAS_RETHINKDB:
- self.error('"rethinkdb" module is needed to use rethinkdbs.py')
- return False
-
- self.debug("rethinkdb driver version {0}".format(rdb.__version__))
- self.rdb = get_rethinkdb()
-
- if not self.connect():
- return None
-
- stats = self.get_stats()
-
- if not stats:
- return None
-
- for v in stats[1:]:
- if get_id(v) == 'server':
- o, c = server_charts(v['server'])
- self.order.extend(o)
- self.definitions.update(c)
-
- return True
-
- def get_data(self):
- if not self.is_alive():
- return None
-
- stats = self.get_stats()
-
- if not stats:
- return None
-
- data = dict()
-
- # cluster
- data.update(Cluster(stats[0]).data())
-
- # servers
- for v in stats[1:]:
- if get_id(v) != 'server':
- continue
-
- s = Server(v)
-
- if s.error():
- data['cluster_servers_missing'] += 1
- else:
- data['cluster_servers_connected'] += 1
- data.update(s.data())
-
- return data
-
- def get_stats(self):
- try:
- return list(self.rdb.db('rethinkdb').table('stats').run(self.conn).items)
- except rdb.errors.ReqlError:
- self.alive = False
- return None
-
- def connect(self):
- try:
- self.conn = self.rdb.connect(
- host=self.host,
- port=self.port,
- user=self.user,
- password=self.password,
- timeout=self.timeout,
- )
- self.alive = True
- return True
- except rdb.errors.ReqlError as error:
- self.error('Connection to {0}:{1} failed: {2}'.format(self.host, self.port, error))
- return False
-
- def reconnect(self):
- # The connection is already closed after rdb.errors.ReqlError,
- # so we do not need to call conn.close()
- if self.connect():
- return True
- return False
-
- def is_alive(self):
- if not self.alive:
- return self.reconnect()
- return True
-
-
-def get_id(v):
- return v['id'][0]
diff --git a/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf b/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
deleted file mode 100644
index d671acbb0..000000000
--- a/src/collectors/python.d.plugin/rethinkdbs/rethinkdbs.conf
+++ /dev/null
@@ -1,76 +0,0 @@
-# netdata python.d.plugin configuration for rethinkdb
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, rethinkdb also supports the following:
-#
-# host: IP or HOSTNAME # default is 'localhost'
-# port: PORT # default is 28015
-# user: USERNAME # default is 'admin'
-# password: PASSWORD # not set by default
-# timeout: TIMEOUT # default is 2
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- name: 'local'
- host: 'localhost'
diff --git a/src/collectors/python.d.plugin/retroshare/README.md b/src/collectors/python.d.plugin/retroshare/README.md
deleted file mode 120000
index 4e4c2cdb7..000000000
--- a/src/collectors/python.d.plugin/retroshare/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/retroshare.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/retroshare/integrations/retroshare.md b/src/collectors/python.d.plugin/retroshare/integrations/retroshare.md
deleted file mode 100644
index b045127ee..000000000
--- a/src/collectors/python.d.plugin/retroshare/integrations/retroshare.md
+++ /dev/null
@@ -1,191 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/retroshare/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/retroshare/metadata.yaml"
-sidebar_label: "RetroShare"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Media Services"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RetroShare
-
-
-<img src="https://netdata.cloud/img/retroshare.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: retroshare
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics.
-
-It connects to the RetroShare web interface to gather metrics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RetroShare instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| retroshare.bandwidth | Upload, Download | kilobits/s |
-| retroshare.peers | All friends, Connected friends | peers |
-| retroshare.dht | DHT nodes estimated, RS nodes estimated | peers |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ retroshare_dht_working ](https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf) | retroshare.dht | number of DHT peers |
-
-
-## Setup
-
-### Prerequisites
-
-#### RetroShare web interface
-
-RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/retroshare.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/retroshare.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |
-
-</details>
-
-#### Examples
-
-##### Local RetroShare Web UI
-
-A basic configuration for a RetroShare server running on localhost.
-
-<details open><summary>Config</summary>
-
-```yaml
-localhost:
- name: 'local retroshare'
- url: 'http://localhost:9090'
-
-```
-</details>
-
-##### Remote RetroShare Web UI
-
-A basic configuration for a remote RetroShare server.
-
-<details open><summary>Config</summary>
-
-```yaml
-remote:
- name: 'remote retroshare'
- url: 'http://1.2.3.4:9090'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `retroshare` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin retroshare debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/retroshare/metadata.yaml b/src/collectors/python.d.plugin/retroshare/metadata.yaml
deleted file mode 100644
index e0270e1dd..000000000
--- a/src/collectors/python.d.plugin/retroshare/metadata.yaml
+++ /dev/null
@@ -1,144 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: retroshare
- monitored_instance:
- name: RetroShare
- link: "https://retroshare.cc/"
- categories:
- - data-collection.media-streaming-servers
- icon_filename: "retroshare.png"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - retroshare
- - p2p
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors RetroShare statistics such as application bandwidth, peers, and DHT metrics."
- method_description: "It connects to the RetroShare web interface to gather metrics."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "The collector will attempt to connect and detect a RetroShare web interface through http://localhost:9090, even without any configuration."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: "RetroShare web interface"
- description: |
- RetroShare needs to be configured to enable the RetroShare WEB Interface and allow access from the Netdata host.
- configuration:
- file:
- name: python.d/retroshare.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: url
- description: The URL to the RetroShare Web UI.
- default_value: "http://localhost:9090"
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local RetroShare Web UI
- description: A basic configuration for a RetroShare server running on localhost.
- config: |
- localhost:
- name: 'local retroshare'
- url: 'http://localhost:9090'
- - name: Remote RetroShare Web UI
- description: A basic configuration for a remote RetroShare server.
- config: |
- remote:
- name: 'remote retroshare'
- url: 'http://1.2.3.4:9090'
-
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: retroshare_dht_working
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/retroshare.conf
- metric: retroshare.dht
- info: number of DHT peers
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: retroshare.bandwidth
- description: RetroShare Bandwidth
- unit: "kilobits/s"
- chart_type: area
- dimensions:
- - name: Upload
- - name: Download
- - name: retroshare.peers
- description: RetroShare Peers
- unit: "peers"
- chart_type: line
- dimensions:
- - name: All friends
- - name: Connected friends
- - name: retroshare.dht
- description: Retroshare DHT
- unit: "peers"
- chart_type: line
- dimensions:
- - name: DHT nodes estimated
- - name: RS nodes estimated
diff --git a/src/collectors/python.d.plugin/retroshare/retroshare.chart.py b/src/collectors/python.d.plugin/retroshare/retroshare.chart.py
deleted file mode 100644
index 3f9593e94..000000000
--- a/src/collectors/python.d.plugin/retroshare/retroshare.chart.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: RetroShare netdata python.d module
-# Authors: sehraf
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-
-from bases.FrameworkServices.UrlService import UrlService
-
-ORDER = [
- 'bandwidth',
- 'peers',
- 'dht',
-]
-
-CHARTS = {
- 'bandwidth': {
- 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
- 'lines': [
- ['bandwidth_up_kb', 'Upload'],
- ['bandwidth_down_kb', 'Download']
- ]
- },
- 'peers': {
- 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
- 'lines': [
- ['peers_all', 'All friends'],
- ['peers_connected', 'Connected friends']
- ]
- },
- 'dht': {
- 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
- 'lines': [
- ['dht_size_all', 'DHT nodes estimated'],
- ['dht_size_rs', 'RS nodes estimated']
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.baseurl = self.configuration.get('url', 'http://localhost:9090')
-
- def _get_stats(self):
- """
- Format data received from http request
- :return: dict
- """
- try:
- raw = self._get_raw_data()
- parsed = json.loads(raw)
- if str(parsed['returncode']) != 'ok':
- return None
- except (TypeError, ValueError):
- return None
-
- return parsed['data'][0]
-
- def _get_data(self):
- """
- Get data from API
- :return: dict
- """
- self.url = self.baseurl + '/api/v2/stats'
- data = self._get_stats()
- if data is None:
- return None
-
- data['bandwidth_up_kb'] = data['bandwidth_up_kb'] * -1
- if data['dht_active'] is False:
- data['dht_size_all'] = None
- data['dht_size_rs'] = None
-
- return data
diff --git a/src/collectors/python.d.plugin/retroshare/retroshare.conf b/src/collectors/python.d.plugin/retroshare/retroshare.conf
deleted file mode 100644
index 3d0af538d..000000000
--- a/src/collectors/python.d.plugin/retroshare/retroshare.conf
+++ /dev/null
@@ -1,72 +0,0 @@
-# netdata python.d.plugin configuration for RetroShare
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, RetroShare also supports the following:
-#
-# - url: 'url' # the URL to the WebUI
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name: 'local'
- url: 'http://localhost:9090'
diff --git a/src/collectors/python.d.plugin/riakkv/README.md b/src/collectors/python.d.plugin/riakkv/README.md
deleted file mode 120000
index f43ece09b..000000000
--- a/src/collectors/python.d.plugin/riakkv/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/riakkv.md \ No newline at end of file
diff --git a/src/collectors/python.d.plugin/riakkv/integrations/riakkv.md b/src/collectors/python.d.plugin/riakkv/integrations/riakkv.md
deleted file mode 100644
index a671b9c76..000000000
--- a/src/collectors/python.d.plugin/riakkv/integrations/riakkv.md
+++ /dev/null
@@ -1,220 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/riakkv/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/riakkv/metadata.yaml"
-sidebar_label: "RiakKV"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Databases"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# RiakKV
-
-
-<img src="https://netdata.cloud/img/riak.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: riakkv
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors RiakKV metrics about throughput, latency, resources and more.'
-
-
-This collector reads the database stats from the `/stats` endpoint.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per RiakKV instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| riak.kv.throughput | gets, puts | operations/s |
-| riak.dt.vnode_updates | counters, sets, maps | operations/s |
-| riak.search | queries | queries/s |
-| riak.search.documents | indexed | documents/s |
-| riak.consistent.operations | gets, puts | operations/s |
-| riak.kv.latency.get | mean, median, 95, 99, 100 | ms |
-| riak.kv.latency.put | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.counter_merge | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.set_merge | mean, median, 95, 99, 100 | ms |
-| riak.dt.latency.map_merge | mean, median, 95, 99, 100 | ms |
-| riak.search.latency.query | median, min, 95, 99, 999, max | ms |
-| riak.search.latency.index | median, min, 95, 99, 999, max | ms |
-| riak.consistent.latency.get | mean, median, 95, 99, 100 | ms |
-| riak.consistent.latency.put | mean, median, 95, 99, 100 | ms |
-| riak.vm | processes | total |
-| riak.vm.memory.processes | allocated, used | MB |
-| riak.kv.siblings_encountered.get | mean, median, 95, 99, 100 | siblings |
-| riak.kv.objsize.get | mean, median, 95, 99, 100 | KB |
-| riak.search.vnodeq_size | mean, median, 95, 99, 100 | messages |
-| riak.search.index | errors | errors |
-| riak.core.protobuf_connections | active | connections |
-| riak.core.repairs | read | repairs |
-| riak.core.fsm_active | get, put, secondary index, list keys | fsms |
-| riak.core.fsm_rejected | get, put | fsms |
-| riak.search.index | bad_entry, extract_fail | writes |
-
-
-
-## Alerts
-
-
-The following alerts are available:
-
-| Alert name | On metric | Description |
-|:------------|:----------|:------------|
-| [ riakkv_1h_kv_get_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to client over the last hour |
-| [ riakkv_kv_get_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.get | average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
-| [ riakkv_1h_kv_put_mean_latency ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last hour |
-| [ riakkv_kv_put_slow ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.kv.latency.put | average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour |
-| [ riakkv_vm_high_process_count ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.vm | number of processes running in the Erlang VM |
-| [ riakkv_list_keys_active ](https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf) | riak.core.fsm_active | number of currently running list keys finite state machines |
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure RiakKV to enable /stats endpoint
-
-You can follow the RiakKV configuration reference documentation for how to enable this.
-
-Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/riakkv.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/riakkv.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| url | The url of the server | no | yes |
-
-</details>
-
-#### Examples
-
-##### Basic (default)
-
-A basic example configuration per job
-
-```yaml
-local:
-url: 'http://localhost:8098/stats'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-local:
- url: 'http://localhost:8098/stats'
-
-remote:
- url: 'http://192.0.2.1:8098/stats'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `riakkv` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin riakkv debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/riakkv/riakkv.chart.py b/src/collectors/python.d.plugin/riakkv/riakkv.chart.py
deleted file mode 100644
index c390c8bc0..000000000
--- a/src/collectors/python.d.plugin/riakkv/riakkv.chart.py
+++ /dev/null
@@ -1,334 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: riak netdata python.d module
-#
-# See also:
-# https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html
-
-from json import loads
-
-from bases.FrameworkServices.UrlService import UrlService
-
-# Riak updates the metrics at the /stats endpoint every 1 second.
-# If we use `update_every = 1` here, that means we might get weird jitter in the graph,
-# so the default is set to 2 seconds to prevent it.
-update_every = 2
-
-# charts order (can be overridden if you want less charts, or different order)
-ORDER = [
- # Throughput metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
- # Collected in totals.
- "kv.node_operations", # K/V node operations.
- "dt.vnode_updates", # Data type vnode updates.
- "search.queries", # Search queries on the node.
- "search.documents", # Documents indexed by Search.
- "consistent.operations", # Consistent node operations.
-
- # Latency metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#throughput-metrics
- # Collected for the past minute in milliseconds,
- # returned from riak in microseconds.
- "kv.latency.get", # K/V GET FSM traversal latency.
- "kv.latency.put", # K/V PUT FSM traversal latency.
- "dt.latency.counter", # Update Counter Data type latency.
- "dt.latency.set", # Update Set Data type latency.
- "dt.latency.map", # Update Map Data type latency.
- "search.latency.query", # Search query latency.
- "search.latency.index", # Time it takes for search to index a new document.
- "consistent.latency.get", # Strong consistent read latency.
- "consistent.latency.put", # Strong consistent write latency.
-
- # Erlang resource usage metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#erlang-resource-usage-metrics
- # Processes collected as a gauge,
- # memory collected as Megabytes, returned as bytes from Riak.
- "vm.processes", # Number of processes currently running in the Erlang VM.
- "vm.memory.processes", # Total amount of memory allocated & used for Erlang processes.
-
- # General Riak Load / Health metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-load-health-metrics
- # The following are collected by Riak over the past minute:
- "kv.siblings_encountered.get", # Siblings encountered during GET operations by this node.
- "kv.objsize.get", # Object size encountered by this node.
- "search.vnodeq_size", # Number of unprocessed messages in the vnode message queues (Search).
- # The following are calculated in total, or as gauges:
- "search.index_errors", # Errors of the search subsystem while indexing documents.
- "core.pbc", # Number of currently active protocol buffer connections.
- "core.repairs", # Total read repair operations coordinated by this node.
- "core.fsm_active", # Active finite state machines by kind.
- "core.fsm_rejected", # Rejected finite state machines by kind.
-
- # General Riak Search Load / Health metrics
- # https://docs.riak.com/riak/kv/latest/using/reference/statistics-monitoring/index.html#general-riak-search-load-health-metrics
- # Reported as counters.
- "search.errors", # Write and read errors of the Search subsystem.
-]
-
-CHARTS = {
- # Throughput metrics
- "kv.node_operations": {
- "options": [None, "Reads & writes coordinated by this node", "operations/s", "throughput", "riak.kv.throughput",
- "line"],
- "lines": [
- ["node_gets_total", "gets", "incremental"],
- ["node_puts_total", "puts", "incremental"]
- ]
- },
- "dt.vnode_updates": {
- "options": [None, "Update operations coordinated by local vnodes by data type", "operations/s", "throughput",
- "riak.dt.vnode_updates", "line"],
- "lines": [
- ["vnode_counter_update_total", "counters", "incremental"],
- ["vnode_set_update_total", "sets", "incremental"],
- ["vnode_map_update_total", "maps", "incremental"],
- ]
- },
- "search.queries": {
- "options": [None, "Search queries on the node", "queries/s", "throughput", "riak.search", "line"],
- "lines": [
- ["search_query_throughput_count", "queries", "incremental"]
- ]
- },
- "search.documents": {
- "options": [None, "Documents indexed by search", "documents/s", "throughput", "riak.search.documents", "line"],
- "lines": [
- ["search_index_throughput_count", "indexed", "incremental"]
- ]
- },
- "consistent.operations": {
- "options": [None, "Consistent node operations", "operations/s", "throughput", "riak.consistent.operations",
- "line"],
- "lines": [
- ["consistent_gets_total", "gets", "incremental"],
- ["consistent_puts_total", "puts", "incremental"],
- ]
- },
-
- # Latency metrics
- "kv.latency.get": {
- "options": [None, "Time between reception of a client GET request and subsequent response to client", "ms",
- "latency", "riak.kv.latency.get", "line"],
- "lines": [
- ["node_get_fsm_time_mean", "mean", "absolute", 1, 1000],
- ["node_get_fsm_time_median", "median", "absolute", 1, 1000],
- ["node_get_fsm_time_95", "95", "absolute", 1, 1000],
- ["node_get_fsm_time_99", "99", "absolute", 1, 1000],
- ["node_get_fsm_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "kv.latency.put": {
- "options": [None, "Time between reception of a client PUT request and subsequent response to client", "ms",
- "latency", "riak.kv.latency.put", "line"],
- "lines": [
- ["node_put_fsm_time_mean", "mean", "absolute", 1, 1000],
- ["node_put_fsm_time_median", "median", "absolute", 1, 1000],
- ["node_put_fsm_time_95", "95", "absolute", 1, 1000],
- ["node_put_fsm_time_99", "99", "absolute", 1, 1000],
- ["node_put_fsm_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.counter": {
- "options": [None, "Time it takes to perform an Update Counter operation", "ms", "latency",
- "riak.dt.latency.counter_merge", "line"],
- "lines": [
- ["object_counter_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_counter_merge_time_median", "median", "absolute", 1, 1000],
- ["object_counter_merge_time_95", "95", "absolute", 1, 1000],
- ["object_counter_merge_time_99", "99", "absolute", 1, 1000],
- ["object_counter_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.set": {
- "options": [None, "Time it takes to perform an Update Set operation", "ms", "latency",
- "riak.dt.latency.set_merge", "line"],
- "lines": [
- ["object_set_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_set_merge_time_median", "median", "absolute", 1, 1000],
- ["object_set_merge_time_95", "95", "absolute", 1, 1000],
- ["object_set_merge_time_99", "99", "absolute", 1, 1000],
- ["object_set_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "dt.latency.map": {
- "options": [None, "Time it takes to perform an Update Map operation", "ms", "latency",
- "riak.dt.latency.map_merge", "line"],
- "lines": [
- ["object_map_merge_time_mean", "mean", "absolute", 1, 1000],
- ["object_map_merge_time_median", "median", "absolute", 1, 1000],
- ["object_map_merge_time_95", "95", "absolute", 1, 1000],
- ["object_map_merge_time_99", "99", "absolute", 1, 1000],
- ["object_map_merge_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "search.latency.query": {
- "options": [None, "Search query latency", "ms", "latency", "riak.search.latency.query", "line"],
- "lines": [
- ["search_query_latency_median", "median", "absolute", 1, 1000],
- ["search_query_latency_min", "min", "absolute", 1, 1000],
- ["search_query_latency_95", "95", "absolute", 1, 1000],
- ["search_query_latency_99", "99", "absolute", 1, 1000],
- ["search_query_latency_999", "999", "absolute", 1, 1000],
- ["search_query_latency_max", "max", "absolute", 1, 1000],
- ]
- },
- "search.latency.index": {
- "options": [None, "Time it takes Search to index a new document", "ms", "latency", "riak.search.latency.index",
- "line"],
- "lines": [
- ["search_index_latency_median", "median", "absolute", 1, 1000],
- ["search_index_latency_min", "min", "absolute", 1, 1000],
- ["search_index_latency_95", "95", "absolute", 1, 1000],
- ["search_index_latency_99", "99", "absolute", 1, 1000],
- ["search_index_latency_999", "999", "absolute", 1, 1000],
- ["search_index_latency_max", "max", "absolute", 1, 1000],
- ]
- },
-
- # Riak Strong Consistency metrics
- "consistent.latency.get": {
- "options": [None, "Strongly consistent read latency", "ms", "latency", "riak.consistent.latency.get", "line"],
- "lines": [
- ["consistent_get_time_mean", "mean", "absolute", 1, 1000],
- ["consistent_get_time_median", "median", "absolute", 1, 1000],
- ["consistent_get_time_95", "95", "absolute", 1, 1000],
- ["consistent_get_time_99", "99", "absolute", 1, 1000],
- ["consistent_get_time_100", "100", "absolute", 1, 1000],
- ]
- },
- "consistent.latency.put": {
- "options": [None, "Strongly consistent write latency", "ms", "latency", "riak.consistent.latency.put", "line"],
- "lines": [
- ["consistent_put_time_mean", "mean", "absolute", 1, 1000],
- ["consistent_put_time_median", "median", "absolute", 1, 1000],
- ["consistent_put_time_95", "95", "absolute", 1, 1000],
- ["consistent_put_time_99", "99", "absolute", 1, 1000],
- ["consistent_put_time_100", "100", "absolute", 1, 1000],
- ]
- },
-
- # BEAM metrics
- "vm.processes": {
- "options": [None, "Total processes running in the Erlang VM", "total", "vm", "riak.vm", "line"],
- "lines": [
- ["sys_process_count", "processes", "absolute"],
- ]
- },
- "vm.memory.processes": {
- "options": [None, "Memory allocated & used by Erlang processes", "MB", "vm", "riak.vm.memory.processes",
- "line"],
- "lines": [
- ["memory_processes", "allocated", "absolute", 1, 1024 * 1024],
- ["memory_processes_used", "used", "absolute", 1, 1024 * 1024]
- ]
- },
-
- # General Riak Load/Health metrics
- "kv.siblings_encountered.get": {
- "options": [None, "Number of siblings encountered during GET operations by this node during the past minute",
- "siblings", "load", "riak.kv.siblings_encountered.get", "line"],
- "lines": [
- ["node_get_fsm_siblings_mean", "mean", "absolute"],
- ["node_get_fsm_siblings_median", "median", "absolute"],
- ["node_get_fsm_siblings_95", "95", "absolute"],
- ["node_get_fsm_siblings_99", "99", "absolute"],
- ["node_get_fsm_siblings_100", "100", "absolute"],
- ]
- },
- "kv.objsize.get": {
- "options": [None, "Object size encountered by this node during the past minute", "KB", "load",
- "riak.kv.objsize.get", "line"],
- "lines": [
- ["node_get_fsm_objsize_mean", "mean", "absolute", 1, 1024],
- ["node_get_fsm_objsize_median", "median", "absolute", 1, 1024],
- ["node_get_fsm_objsize_95", "95", "absolute", 1, 1024],
- ["node_get_fsm_objsize_99", "99", "absolute", 1, 1024],
- ["node_get_fsm_objsize_100", "100", "absolute", 1, 1024],
- ]
- },
- "search.vnodeq_size": {
- "options": [None,
- "Number of unprocessed messages in the vnode message queues of Search on this node in the past minute",
- "messages", "load", "riak.search.vnodeq_size", "line"],
- "lines": [
- ["riak_search_vnodeq_mean", "mean", "absolute"],
- ["riak_search_vnodeq_median", "median", "absolute"],
- ["riak_search_vnodeq_95", "95", "absolute"],
- ["riak_search_vnodeq_99", "99", "absolute"],
- ["riak_search_vnodeq_100", "100", "absolute"],
- ]
- },
- "search.index_errors": {
- "options": [None, "Number of document index errors encountered by Search", "errors", "load",
- "riak.search.index", "line"],
- "lines": [
- ["search_index_fail_count", "errors", "absolute"]
- ]
- },
- "core.pbc": {
- "options": [None, "Protocol buffer connections by status", "connections", "load",
- "riak.core.protobuf_connections", "line"],
- "lines": [
- ["pbc_active", "active", "absolute"],
- # ["pbc_connects", "established_pastmin", "absolute"]
- ]
- },
- "core.repairs": {
- "options": [None, "Number of repair operations this node has coordinated", "repairs", "load",
- "riak.core.repairs", "line"],
- "lines": [
- ["read_repairs", "read", "absolute"]
- ]
- },
- "core.fsm_active": {
- "options": [None, "Active finite state machines by kind", "fsms", "load", "riak.core.fsm_active", "line"],
- "lines": [
- ["node_get_fsm_active", "get", "absolute"],
- ["node_put_fsm_active", "put", "absolute"],
- ["index_fsm_active", "secondary index", "absolute"],
- ["list_fsm_active", "list keys", "absolute"]
- ]
- },
- "core.fsm_rejected": {
- # Writing "Sidejob's" here seems to cause some weird issues: it results in this chart being rendered in
- # its own context and additionally, moves the entire Riak graph all the way up to the top of the Netdata
- # dashboard for some reason.
- "options": [None, "Finite state machines being rejected by Sidejobs overload protection", "fsms", "load",
- "riak.core.fsm_rejected", "line"],
- "lines": [
- ["node_get_fsm_rejected", "get", "absolute"],
- ["node_put_fsm_rejected", "put", "absolute"]
- ]
- },
-
- # General Riak Search Load / Health metrics
- "search.errors": {
- "options": [None, "Number of writes to Search failed due to bad data format by reason", "writes", "load",
- "riak.search.index", "line"],
- "lines": [
- ["search_index_bad_entry_count", "bad_entry", "absolute"],
- ["search_index_extract_fail_count", "extract_fail", "absolute"],
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- raw = self._get_raw_data()
- if not raw:
- return None
-
- try:
- return loads(raw)
- except (TypeError, ValueError) as err:
- self.error(err)
- return None
diff --git a/src/collectors/python.d.plugin/riakkv/riakkv.conf b/src/collectors/python.d.plugin/riakkv/riakkv.conf
deleted file mode 100644
index be01c48ac..000000000
--- a/src/collectors/python.d.plugin/riakkv/riakkv.conf
+++ /dev/null
@@ -1,68 +0,0 @@
-# netdata python.d.plugin configuration for riak
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- url : 'http://localhost:8098/stats'
diff --git a/src/collectors/python.d.plugin/samba/integrations/samba.md b/src/collectors/python.d.plugin/samba/integrations/samba.md
index b4a551a8e..4d6f8fcc3 100644
--- a/src/collectors/python.d.plugin/samba/integrations/samba.md
+++ b/src/collectors/python.d.plugin/samba/integrations/samba.md
@@ -196,6 +196,7 @@ my_job_name:
### Debug Mode
+
To troubleshoot issues with the `samba` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -218,4 +219,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin samba debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `samba` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep samba
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep samba /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep samba
+```
+
diff --git a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
index 8f7fdaf4d..2e5e60669 100644
--- a/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
+++ b/src/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
@@ -191,6 +191,7 @@ remote_server:
### Debug Mode
+
To troubleshoot issues with the `spigotmc` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -213,4 +214,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin spigotmc debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `spigotmc` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep spigotmc
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep spigotmc /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep spigotmc
+```
+
diff --git a/src/collectors/python.d.plugin/squid/integrations/squid.md b/src/collectors/python.d.plugin/squid/integrations/squid.md
deleted file mode 100644
index 10f927af7..000000000
--- a/src/collectors/python.d.plugin/squid/integrations/squid.md
+++ /dev/null
@@ -1,199 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/squid/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/squid/metadata.yaml"
-sidebar_label: "Squid"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Squid
-
-
-<img src="https://netdata.cloud/img/squid.png" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: squid
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
-
-
-It collects metrics from the endpoint where Squid exposes its `counters` data.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Squid instance
-
-These metrics refer to each monitored Squid instance.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| squid.clients_net | in, out, hits | kilobits/s |
-| squid.clients_requests | requests, hits, errors | requests/s |
-| squid.servers_net | in, out | kilobits/s |
-| squid.servers_requests | requests, errors | requests/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure Squid's Cache Manager
-
-Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/squid.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/squid.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
-| host | The host to connect to. | | yes |
-| port | The port to connect to. | | yes |
-| request | The URL to request from Squid. | | yes |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic configuration example.
-
-```yaml
-example_job_name:
- name: 'local'
- host: 'localhost'
- port: 3128
- request: 'cache_object://localhost:3128/counters'
-
-```
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-local_job:
- name: 'local'
- host: '127.0.0.1'
- port: 3128
- request: 'cache_object://127.0.0.1:3128/counters'
-
-remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 3128
- request: 'cache_object://192.0.2.1:3128/counters'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `squid` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin squid debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/squid/squid.chart.py b/src/collectors/python.d.plugin/squid/squid.chart.py
deleted file mode 100644
index bcae2d892..000000000
--- a/src/collectors/python.d.plugin/squid/squid.chart.py
+++ /dev/null
@@ -1,123 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: squid netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'clients_net',
- 'clients_requests',
- 'servers_net',
- 'servers_requests',
-]
-
-CHARTS = {
- 'clients_net': {
- 'options': [None, 'Squid Client Bandwidth', 'kilobits/s', 'clients', 'squid.clients_net', 'area'],
- 'lines': [
- ['client_http_kbytes_in', 'in', 'incremental', 8, 1],
- ['client_http_kbytes_out', 'out', 'incremental', -8, 1],
- ['client_http_hit_kbytes_out', 'hits', 'incremental', -8, 1]
- ]
- },
- 'clients_requests': {
- 'options': [None, 'Squid Client Requests', 'requests/s', 'clients', 'squid.clients_requests', 'line'],
- 'lines': [
- ['client_http_requests', 'requests', 'incremental'],
- ['client_http_hits', 'hits', 'incremental'],
- ['client_http_errors', 'errors', 'incremental', -1, 1]
- ]
- },
- 'servers_net': {
- 'options': [None, 'Squid Server Bandwidth', 'kilobits/s', 'servers', 'squid.servers_net', 'area'],
- 'lines': [
- ['server_all_kbytes_in', 'in', 'incremental', 8, 1],
- ['server_all_kbytes_out', 'out', 'incremental', -8, 1]
- ]
- },
- 'servers_requests': {
- 'options': [None, 'Squid Server Requests', 'requests/s', 'servers', 'squid.servers_requests', 'line'],
- 'lines': [
- ['server_all_requests', 'requests', 'incremental'],
- ['server_all_errors', 'errors', 'incremental', -1, 1]
- ]
- }
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- SocketService.__init__(self, configuration=configuration, name=name)
- self._keep_alive = True
- self.request = ''
- self.host = 'localhost'
- self.port = 3128
- self.order = ORDER
- self.definitions = CHARTS
-
- def _get_data(self):
- """
- Get data via http request
- :return: dict
- """
- response = self._get_raw_data()
-
- data = dict()
- try:
- raw = ''
- for tmp in response.split('\r\n'):
- if tmp.startswith('sample_time'):
- raw = tmp
- break
-
- if raw.startswith('<'):
- self.error('invalid data received')
- return None
-
- for row in raw.split('\n'):
- if row.startswith(('client', 'server.all')):
- tmp = row.split('=')
- data[tmp[0].replace('.', '_').strip(' ')] = int(tmp[1])
-
- except (ValueError, AttributeError, TypeError):
- self.error('invalid data received')
- return None
-
- if not data:
- self.error('no data received')
- return None
- return data
-
- def _check_raw_data(self, data):
- header = data[:1024].lower()
-
- if 'connection: keep-alive' in header:
- self._keep_alive = True
- else:
- self._keep_alive = False
-
- if data[-7:] == '\r\n0\r\n\r\n' and 'transfer-encoding: chunked' in header: # HTTP/1.1 response
- self.debug('received full response from squid')
- return True
-
- self.debug('waiting more data from squid')
- return False
-
- def check(self):
- """
- Parse essential configuration, autodetect squid configuration (if needed), and check if data is available
- :return: boolean
- """
- self._parse_config()
- # format request
- req = self.request.decode()
- if not req.startswith('GET'):
- req = 'GET ' + req
- if not req.endswith(' HTTP/1.1\r\n\r\n'):
- req += ' HTTP/1.1\r\n\r\n'
- self.request = req.encode()
- if self._get_data() is not None:
- return True
- else:
- return False
diff --git a/src/collectors/python.d.plugin/squid/squid.conf b/src/collectors/python.d.plugin/squid/squid.conf
deleted file mode 100644
index b90a52c0c..000000000
--- a/src/collectors/python.d.plugin/squid/squid.conf
+++ /dev/null
@@ -1,167 +0,0 @@
-# netdata python.d.plugin configuration for squid
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, squid also supports the following:
-#
-# host : 'IP or HOSTNAME' # the host to connect to
-# port : PORT # the port to connect to
-# request: 'URL' # the URL to request from squid
-#
-
-# ----------------------------------------------------------------------
-# SQUID CONFIGURATION
-#
-# See:
-# http://wiki.squid-cache.org/Features/CacheManager
-#
-# In short, add to your squid configuration these:
-#
-# http_access allow localhost manager
-# http_access deny manager
-#
-# To remotely monitor a squid:
-#
-# acl managerAdmin src 192.0.2.1
-# http_access allow localhost manager
-# http_access allow managerAdmin manager
-# http_access deny manager
-#
-
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-tcp3128old:
- name : 'local'
- host : 'localhost'
- port : 3128
- request : 'cache_object://localhost:3128/counters'
-
-tcp8080old:
- name : 'local'
- host : 'localhost'
- port : 8080
- request : 'cache_object://localhost:3128/counters'
-
-tcp3128new:
- name : 'local'
- host : 'localhost'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080new:
- name : 'local'
- host : 'localhost'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
-# IPv4
-
-tcp3128oldipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 3128
- request : 'cache_object://127.0.0.1:3128/counters'
-
-tcp8080oldipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 8080
- request : 'cache_object://127.0.0.1:3128/counters'
-
-tcp3128newipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080newipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
-# IPv6
-
-tcp3128oldipv6:
- name : 'local'
- host : '::1'
- port : 3128
- request : 'cache_object://[::1]:3128/counters'
-
-tcp8080oldipv6:
- name : 'local'
- host : '::1'
- port : 8080
- request : 'cache_object://[::1]:3128/counters'
-
-tcp3128newipv6:
- name : 'local'
- host : '::1'
- port : 3128
- request : '/squid-internal-mgr/counters'
-
-tcp8080newipv6:
- name : 'local'
- host : '::1'
- port : 8080
- request : '/squid-internal-mgr/counters'
-
diff --git a/src/collectors/python.d.plugin/tomcat/integrations/tomcat.md b/src/collectors/python.d.plugin/tomcat/integrations/tomcat.md
deleted file mode 100644
index 64938ad62..000000000
--- a/src/collectors/python.d.plugin/tomcat/integrations/tomcat.md
+++ /dev/null
@@ -1,203 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/tomcat/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/tomcat/metadata.yaml"
-sidebar_label: "Tomcat"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Tomcat
-
-
-<img src="https://netdata.cloud/img/tomcat.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: tomcat
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
-
-
-It parses the information provided by the http endpoint of the `/manager/status` in XML format
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint
-
-### Default Behavior
-
-#### Auto-Detection
-
-If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail.
-
-#### Limits
-
-This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Tomcat instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| tomcat.accesses | accesses, errors | requests/s |
-| tomcat.bandwidth | sent, received | KiB/s |
-| tomcat.processing_time | processing time | seconds |
-| tomcat.threads | current, busy | current threads |
-| tomcat.jvm | free, eden, survivor, tenured, code cache, compressed, metaspace | MiB |
-| tomcat.jvm_eden | used, committed, max | MiB |
-| tomcat.jvm_survivor | used, committed, max | MiB |
-| tomcat.jvm_tenured | used, committed, max | MiB |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Create a read-only `netdata` user, to monitor the `/status` endpoint.
-
-This is necessary for configuring the collector.
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/tomcat.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/tomcat.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options per job</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |
-| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |
-| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |
-| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |
-
-</details>
-
-#### Examples
-
-##### Basic
-
-A basic example configuration
-
-```yaml
-localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
-
-```
-##### Using an IPv4 endpoint
-
-A typical configuration using an IPv4 endpoint
-
-<details open><summary>Config</summary>
-
-```yaml
-local_ipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
-
-```
-</details>
-
-##### Using an IPv6 endpoint
-
-A typical configuration using an IPv6 endpoint
-
-<details open><summary>Config</summary>
-
-```yaml
-local_ipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `tomcat` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin tomcat debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/tomcat/metadata.yaml b/src/collectors/python.d.plugin/tomcat/metadata.yaml
deleted file mode 100644
index e68526073..000000000
--- a/src/collectors/python.d.plugin/tomcat/metadata.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: tomcat
- monitored_instance:
- name: Tomcat
- link: "https://tomcat.apache.org/"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "tomcat.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - apache
- - tomcat
- - webserver
- - websocket
- - jakarta
- - javaEE
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Tomcat metrics about bandwidth, processing time, threads and more.
- method_description: |
- It parses the information provided by the http endpoint of the `/manager/status` in XML format
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "You need to provide the username and the password, to access the webserver's status page. Create a seperate user with read only rights for this particular endpoint"
- default_behavior:
- auto_detection:
- description: "If the Netdata Agent and the Tomcat webserver are in the same host, without configuration, module attempts to connect to http://localhost:8080/manager/status?XML=true, without any credentials. So it will probably fail."
- limits:
- description: "This module is not supporting SSL communication. If you want a Netdata Agent to monitor a Tomcat deployment, you shouldnt try to monitor it via public network (public internet). Credentials are passed by Netdata in an unsecure port"
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Create a read-only `netdata` user, to monitor the `/status` endpoint.
- description: This is necessary for configuring the collector.
- configuration:
- file:
- name: "python.d/tomcat.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options per job"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: url
- description: The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true.
- default_value: no
- required: true
- - name: user
- description: A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected
- default_value: no
- required: false
- - name: pass
- description: A valid password for the user in question. Required if the endpoint is password protected
- default_value: no
- required: false
- - name: connector_name
- description: The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009
- default_value: ""
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration
- config: |
- localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
- - name: Using an IPv4 endpoint
- description: A typical configuration using an IPv4 endpoint
- config: |
- local_ipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
- - name: Using an IPv6 endpoint
- description: A typical configuration using an IPv6 endpoint
- config: |
- local_ipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: tomcat.accesses
- description: Requests
- unit: "requests/s"
- chart_type: area
- dimensions:
- - name: accesses
- - name: errors
- - name: tomcat.bandwidth
- description: Bandwidth
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: sent
- - name: received
- - name: tomcat.processing_time
- description: processing time
- unit: "seconds"
- chart_type: area
- dimensions:
- - name: processing time
- - name: tomcat.threads
- description: Threads
- unit: "current threads"
- chart_type: area
- dimensions:
- - name: current
- - name: busy
- - name: tomcat.jvm
- description: JVM Memory Pool Usage
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: free
- - name: eden
- - name: survivor
- - name: tenured
- - name: code cache
- - name: compressed
- - name: metaspace
- - name: tomcat.jvm_eden
- description: Eden Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
- - name: tomcat.jvm_survivor
- description: Survivor Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
- - name: tomcat.jvm_tenured
- description: Tenured Memory Usage
- unit: "MiB"
- chart_type: area
- dimensions:
- - name: used
- - name: committed
- - name: max
diff --git a/src/collectors/python.d.plugin/tomcat/tomcat.chart.py b/src/collectors/python.d.plugin/tomcat/tomcat.chart.py
deleted file mode 100644
index 90315f8c7..000000000
--- a/src/collectors/python.d.plugin/tomcat/tomcat.chart.py
+++ /dev/null
@@ -1,199 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: tomcat netdata python.d module
-# Author: Pawel Krupa (paulfantom)
-# Author: Wei He (Wing924)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import re
-import xml.etree.ElementTree as ET
-
-from bases.FrameworkServices.UrlService import UrlService
-
-MiB = 1 << 20
-
-# Regex fix for Tomcat single quote XML attributes
-# affecting Tomcat < 8.5.24 & 9.0.2 running with Java > 9
-# cf. https://bz.apache.org/bugzilla/show_bug.cgi?id=61603
-single_quote_regex = re.compile(r"='([^']+)'([^']+)''")
-
-ORDER = [
- 'accesses',
- 'bandwidth',
- 'processing_time',
- 'threads',
- 'jvm',
- 'jvm_eden',
- 'jvm_survivor',
- 'jvm_tenured',
-]
-
-CHARTS = {
- 'accesses': {
- 'options': [None, 'Requests', 'requests/s', 'statistics', 'tomcat.accesses', 'area'],
- 'lines': [
- ['requestCount', 'accesses', 'incremental'],
- ['errorCount', 'errors', 'incremental'],
- ]
- },
- 'bandwidth': {
- 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
- 'lines': [
- ['bytesSent', 'sent', 'incremental', 1, 1024],
- ['bytesReceived', 'received', 'incremental', 1, 1024],
- ]
- },
- 'processing_time': {
- 'options': [None, 'processing time', 'seconds', 'statistics', 'tomcat.processing_time', 'area'],
- 'lines': [
- ['processingTime', 'processing time', 'incremental', 1, 1000]
- ]
- },
- 'threads': {
- 'options': [None, 'Threads', 'current threads', 'statistics', 'tomcat.threads', 'area'],
- 'lines': [
- ['currentThreadCount', 'current', 'absolute'],
- ['currentThreadsBusy', 'busy', 'absolute']
- ]
- },
- 'jvm': {
- 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
- 'lines': [
- ['free', 'free', 'absolute', 1, MiB],
- ['eden_used', 'eden', 'absolute', 1, MiB],
- ['survivor_used', 'survivor', 'absolute', 1, MiB],
- ['tenured_used', 'tenured', 'absolute', 1, MiB],
- ['code_cache_used', 'code cache', 'absolute', 1, MiB],
- ['compressed_used', 'compressed', 'absolute', 1, MiB],
- ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
- ]
- },
- 'jvm_eden': {
- 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
- 'lines': [
- ['eden_used', 'used', 'absolute', 1, MiB],
- ['eden_committed', 'committed', 'absolute', 1, MiB],
- ['eden_max', 'max', 'absolute', 1, MiB]
- ]
- },
- 'jvm_survivor': {
- 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
- 'lines': [
- ['survivor_used', 'used', 'absolute', 1, MiB],
- ['survivor_committed', 'committed', 'absolute', 1, MiB],
- ['survivor_max', 'max', 'absolute', 1, MiB],
- ]
- },
- 'jvm_tenured': {
- 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
- 'lines': [
- ['tenured_used', 'used', 'absolute', 1, MiB],
- ['tenured_committed', 'committed', 'absolute', 1, MiB],
- ['tenured_max', 'max', 'absolute', 1, MiB]
- ]
- }
-}
-
-
-class Service(UrlService):
- def __init__(self, configuration=None, name=None):
- UrlService.__init__(self, configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
- self.connector_name = self.configuration.get('connector_name', None)
- self.parse = self.xml_parse
-
- def xml_parse(self, data):
- try:
- return ET.fromstring(data)
- except ET.ParseError:
- self.debug('%s is not a valid XML page. Please add "?XML=true" to tomcat status page.' % self.url)
- return None
-
- def xml_single_quote_fix_parse(self, data):
- data = single_quote_regex.sub(r"='\g<1>\g<2>'", data)
- return self.xml_parse(data)
-
- def check(self):
- self._manager = self._build_manager()
-
- raw_data = self._get_raw_data()
- if not raw_data:
- return False
-
- if single_quote_regex.search(raw_data):
- self.warning('Tomcat status page is returning invalid single quote XML, please consider upgrading '
- 'your Tomcat installation. See https://bz.apache.org/bugzilla/show_bug.cgi?id=61603')
- self.parse = self.xml_single_quote_fix_parse
-
- return self.parse(raw_data) is not None
-
- def _get_data(self):
- """
- Format data received from http request
- :return: dict
- """
- data = None
- raw_data = self._get_raw_data()
- if raw_data:
- xml = self.parse(raw_data)
- if xml is None:
- return None
-
- data = {}
-
- jvm = xml.find('jvm')
-
- connector = None
- if self.connector_name:
- for conn in xml.findall('connector'):
- if self.connector_name in conn.get('name'):
- connector = conn
- break
- else:
- connector = xml.find('connector')
-
- memory = jvm.find('memory')
- data['free'] = memory.get('free')
- data['total'] = memory.get('total')
-
- for pool in jvm.findall('memorypool'):
- name = pool.get('name')
- if 'Eden Space' in name:
- data['eden_used'] = pool.get('usageUsed')
- data['eden_committed'] = pool.get('usageCommitted')
- data['eden_max'] = pool.get('usageMax')
- elif 'Survivor Space' in name:
- data['survivor_used'] = pool.get('usageUsed')
- data['survivor_committed'] = pool.get('usageCommitted')
- data['survivor_max'] = pool.get('usageMax')
- elif 'Tenured Gen' in name or 'Old Gen' in name:
- data['tenured_used'] = pool.get('usageUsed')
- data['tenured_committed'] = pool.get('usageCommitted')
- data['tenured_max'] = pool.get('usageMax')
- elif name == 'Code Cache':
- data['code_cache_used'] = pool.get('usageUsed')
- data['code_cache_committed'] = pool.get('usageCommitted')
- data['code_cache_max'] = pool.get('usageMax')
- elif name == 'Compressed':
- data['compressed_used'] = pool.get('usageUsed')
- data['compressed_committed'] = pool.get('usageCommitted')
- data['compressed_max'] = pool.get('usageMax')
- elif name == 'Metaspace':
- data['metaspace_used'] = pool.get('usageUsed')
- data['metaspace_committed'] = pool.get('usageCommitted')
- data['metaspace_max'] = pool.get('usageMax')
-
- if connector is not None:
- thread_info = connector.find('threadInfo')
- data['currentThreadsBusy'] = thread_info.get('currentThreadsBusy')
- data['currentThreadCount'] = thread_info.get('currentThreadCount')
-
- request_info = connector.find('requestInfo')
- data['processingTime'] = request_info.get('processingTime')
- data['requestCount'] = request_info.get('requestCount')
- data['errorCount'] = request_info.get('errorCount')
- data['bytesReceived'] = request_info.get('bytesReceived')
- data['bytesSent'] = request_info.get('bytesSent')
-
- return data or None
diff --git a/src/collectors/python.d.plugin/tomcat/tomcat.conf b/src/collectors/python.d.plugin/tomcat/tomcat.conf
deleted file mode 100644
index 009591bdf..000000000
--- a/src/collectors/python.d.plugin/tomcat/tomcat.conf
+++ /dev/null
@@ -1,89 +0,0 @@
-# netdata python.d.plugin configuration for tomcat
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, tomcat also supports the following:
-#
-# url: 'URL' # the URL to fetch nginx's status stats
-#
-# if the URL is password protected, the following are supported:
-#
-# user: 'username'
-# pass: 'password'
-#
-# if you have multiple connectors, the following are supported:
-#
-# connector_name: 'ajp-bio-8009' # default is null, which use first connector in status XML
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-localhost:
- name : 'local'
- url : 'http://localhost:8080/manager/status?XML=true'
-
-localipv4:
- name : 'local'
- url : 'http://127.0.0.1:8080/manager/status?XML=true'
-
-localipv6:
- name : 'local'
- url : 'http://[::1]:8080/manager/status?XML=true'
diff --git a/src/collectors/python.d.plugin/tor/integrations/tor.md b/src/collectors/python.d.plugin/tor/integrations/tor.md
deleted file mode 100644
index 728245cfa..000000000
--- a/src/collectors/python.d.plugin/tor/integrations/tor.md
+++ /dev/null
@@ -1,197 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/tor/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/tor/metadata.yaml"
-sidebar_label: "Tor"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/VPNs"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# Tor
-
-
-<img src="https://netdata.cloud/img/tor.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: tor
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors Tor bandwidth traffic .
-
-It connects to the Tor control port to collect traffic statistics.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Tor instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| tor.traffic | read, write | KiB/s |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Required python module
-
-The `stem` python library needs to be installed.
-
-
-#### Required Tor configuration
-
-Add to /etc/tor/torrc:
-
-ControlPort 9051
-
-For more options please read the manual.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/tor.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/tor.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
-| control_addr | Tor control IP address | 127.0.0.1 | no |
-| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |
-| password | Tor control password | | no |
-
-</details>
-
-#### Examples
-
-##### Local TCP
-
-A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
-
-<details open><summary>Config</summary>
-
-```yaml
-local_tcp:
- name: 'local'
- control_port: 9051
- password: <password> # if required
-
-```
-</details>
-
-##### Local socket
-
-A basic local socket configuration
-
-<details open><summary>Config</summary>
-
-```yaml
-local_socket:
- name: 'local'
- control_port: '/var/run/tor/control'
- password: <password> # if required
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `tor` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin tor debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/tor/metadata.yaml b/src/collectors/python.d.plugin/tor/metadata.yaml
deleted file mode 100644
index 8647eca23..000000000
--- a/src/collectors/python.d.plugin/tor/metadata.yaml
+++ /dev/null
@@ -1,143 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: tor
- monitored_instance:
- name: Tor
- link: 'https://www.torproject.org/'
- categories:
- - data-collection.vpns
- icon_filename: 'tor.svg'
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords:
- - tor
- - traffic
- - vpn
- most_popular: false
- overview:
- data_collection:
- metrics_description: 'This collector monitors Tor bandwidth traffic .'
- method_description: 'It connects to the Tor control port to collect traffic statistics.'
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: 'If no configuration is provided the collector will try to connect to 127.0.0.1:9051 to detect a running tor instance.'
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list:
- - title: 'Required python module'
- description: |
- The `stem` python library needs to be installed.
- - title: 'Required Tor configuration'
- description: |
- Add to /etc/tor/torrc:
-
- ControlPort 9051
-
- For more options please read the manual.
- configuration:
- file:
- name: python.d/tor.conf
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ''
- required: false
- - name: control_addr
- description: Tor control IP address
- default_value: 127.0.0.1
- required: false
- - name: control_port
- description: Tor control port. Can be either a tcp port, or a path to a socket file.
- default_value: 9051
- required: false
- - name: password
- description: Tor control password
- default_value: ''
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Local TCP
- description: A basic TCP configuration. `local_addr` is ommited and will default to `127.0.0.1`
- config: |
- local_tcp:
- name: 'local'
- control_port: 9051
- password: <password> # if required
- - name: Local socket
- description: A basic local socket configuration
- config: |
- local_socket:
- name: 'local'
- control_port: '/var/run/tor/control'
- password: <password> # if required
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: tor.traffic
- description: Tor Traffic
- unit: "KiB/s"
- chart_type: area
- dimensions:
- - name: read
- - name: write
diff --git a/src/collectors/python.d.plugin/tor/tor.chart.py b/src/collectors/python.d.plugin/tor/tor.chart.py
deleted file mode 100644
index f7bc2d79b..000000000
--- a/src/collectors/python.d.plugin/tor/tor.chart.py
+++ /dev/null
@@ -1,109 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: adaptec_raid netdata python.d module
-# Author: Federico Ceratto <federico.ceratto@gmail.com>
-# Author: Ilya Mashchenko (ilyam8)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-
-from bases.FrameworkServices.SimpleService import SimpleService
-
-try:
- import stem
- import stem.connection
- import stem.control
-
- STEM_AVAILABLE = True
-except ImportError:
- STEM_AVAILABLE = False
-
-DEF_PORT = 'default'
-DEF_ADDR = '127.0.0.1'
-
-ORDER = [
- 'traffic',
-]
-
-CHARTS = {
- 'traffic': {
- 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
- 'lines': [
- ['read', 'read', 'incremental', 1, 1024],
- ['write', 'write', 'incremental', 1, -1024],
- ]
- }
-}
-
-
-class Service(SimpleService):
- """Provide netdata service for Tor"""
-
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = CHARTS
- self.port = self.configuration.get('control_port', DEF_PORT)
- self.addr = self.configuration.get('control_addr', DEF_ADDR)
- self.password = self.configuration.get('password')
- self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
- self.conn = None
- self.alive = False
-
- def check(self):
- if not STEM_AVAILABLE:
- self.error('the stem library is missing')
- return False
-
- return self.connect()
-
- def get_data(self):
- if not self.alive and not self.reconnect():
- return None
-
- data = dict()
-
- try:
- data['read'] = self.conn.get_info('traffic/read')
- data['write'] = self.conn.get_info('traffic/written')
- except stem.ControllerError as error:
- self.debug(error)
- self.alive = False
-
- return data or None
-
- def authenticate(self):
- try:
- self.conn.authenticate(password=self.password)
- except stem.connection.AuthenticationFailure as error:
- self.error('authentication error: {0}'.format(error))
- return False
- return True
-
- def connect_via_port(self):
- try:
- self.conn = stem.control.Controller.from_port(address=self.addr, port=self.port)
- except (stem.SocketError, ValueError) as error:
- self.error(error)
-
- def connect_via_socket(self):
- try:
- self.conn = stem.control.Controller.from_socket_file(path=self.port)
- except (stem.SocketError, ValueError) as error:
- self.error(error)
-
- def connect(self):
- if self.conn:
- self.conn.close()
- self.conn = None
-
- if self.use_socket:
- self.connect_via_socket()
- else:
- self.connect_via_port()
-
- if self.conn and self.authenticate():
- self.alive = True
-
- return self.alive
-
- def reconnect(self):
- return self.connect()
diff --git a/src/collectors/python.d.plugin/tor/tor.conf b/src/collectors/python.d.plugin/tor/tor.conf
deleted file mode 100644
index c7c98dc0b..000000000
--- a/src/collectors/python.d.plugin/tor/tor.conf
+++ /dev/null
@@ -1,81 +0,0 @@
-# netdata python.d.plugin configuration for tor
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, tor plugin also supports the following:
-#
-# control_addr: 'address' # tor control IP address (defaults to '127.0.0.1')
-# control_port: 'port' # tor control port
-# password: 'password' # tor control password
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-# local_tcp:
-# name: 'local'
-# control_port: 9051
-# control_addr: 127.0.0.1
-# password: <password>
-#
-# local_socket:
-# name: 'local'
-# control_port: '/var/run/tor/control'
-# password: <password>
diff --git a/src/collectors/python.d.plugin/traefik/metadata.yaml b/src/collectors/python.d.plugin/traefik/metadata.yaml
index 1d65a3dfe..5382ad54f 100644
--- a/src/collectors/python.d.plugin/traefik/metadata.yaml
+++ b/src/collectors/python.d.plugin/traefik/metadata.yaml
@@ -1,5 +1,5 @@
# This collector will not appear in documentation, as the go version is preferred,
-# /src/go/collectors/go.d.plugin/modules/traefik/README.md
+# /src/go/plugin/go.d/modules/traefik/README.md
#
# meta:
# plugin_name: python.d.plugin
diff --git a/src/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md b/src/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
deleted file mode 100644
index 508d9d195..000000000
--- a/src/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
+++ /dev/null
@@ -1,219 +0,0 @@
-<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/uwsgi/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/uwsgi/metadata.yaml"
-sidebar_label: "uWSGI"
-learn_status: "Published"
-learn_rel_path: "Collecting Metrics/Web Servers and Web Proxies"
-most_popular: False
-message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
-endmeta-->
-
-# uWSGI
-
-
-<img src="https://netdata.cloud/img/uwsgi.svg" width="150"/>
-
-
-Plugin: python.d.plugin
-Module: uwsgi
-
-<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
-
-## Overview
-
-This collector monitors uWSGI metrics about requests, workers, memory and more.
-
-It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket.
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per uWSGI instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| uwsgi.requests | a dimension per worker | requests/s |
-| uwsgi.tx | a dimension per worker | KiB/s |
-| uwsgi.avg_rt | a dimension per worker | milliseconds |
-| uwsgi.memory_rss | a dimension per worker | MiB |
-| uwsgi.memory_vsz | a dimension per worker | MiB |
-| uwsgi.exceptions | exceptions | exceptions |
-| uwsgi.harakiris | harakiris | harakiris |
-| uwsgi.respawns | respawns | respawns |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Enable the uWSGI Stats server
-
-Make sure that you uWSGI exposes it's metrics via a Stats server.
-
-Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `python.d/uwsgi.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/uwsgi.conf
-```
-#### Options
-
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
-
-
-<details open><summary>Config options</summary>
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
-| socket | The 'path/to/uwsgistats.sock' | no | no |
-| host | The host to connect to | no | no |
-| port | The port to connect to | no | no |
-
-</details>
-
-#### Examples
-
-##### Basic (default out-of-the-box)
-
-A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
-
-<details open><summary>Config</summary>
-
-```yaml
-socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 1717
-
-```
-</details>
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-<details open><summary>Config</summary>
-
-```yaml
-local:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-remote:
- name : 'remote'
- host : '192.0.2.1'
- port : 1717
-
-```
-</details>
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `uwsgi` collector, run the `python.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `python.d.plugin` to debug the collector:
-
- ```bash
- ./python.d.plugin uwsgi debug trace
- ```
-
-
diff --git a/src/collectors/python.d.plugin/uwsgi/metadata.yaml b/src/collectors/python.d.plugin/uwsgi/metadata.yaml
deleted file mode 100644
index cdb090ac1..000000000
--- a/src/collectors/python.d.plugin/uwsgi/metadata.yaml
+++ /dev/null
@@ -1,201 +0,0 @@
-plugin_name: python.d.plugin
-modules:
- - meta:
- plugin_name: python.d.plugin
- module_name: uwsgi
- monitored_instance:
- name: uWSGI
- link: "https://github.com/unbit/uwsgi/tree/2.0.21"
- categories:
- - data-collection.web-servers-and-web-proxies
- icon_filename: "uwsgi.svg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - application server
- - python
- - web applications
- most_popular: false
- overview:
- data_collection:
- metrics_description: "This collector monitors uWSGI metrics about requests, workers, memory and more."
- method_description: "It collects every metric exposed from the stats server of uWSGI, either from the `stats.socket` or from the web server's TCP/IP socket."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: "This collector will auto-detect uWSGI instances deployed on the local host, running on port 1717, or exposing stats on socket `tmp/stats.socket`."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Enable the uWSGI Stats server
- description: |
- Make sure that you uWSGI exposes it's metrics via a Stats server.
-
- Source: https://uwsgi-docs.readthedocs.io/en/latest/StatsServer.html
- configuration:
- file:
- name: "python.d/uwsgi.conf"
- options:
- description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- - name: autodetection_retry
- description: Sets the job re-check interval in seconds.
- default_value: 0
- required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: The JOB's name as it will appear at the dashboard (by default is the job_name)
- default_value: job_name
- required: false
- - name: socket
- description: The 'path/to/uwsgistats.sock'
- default_value: no
- required: false
- - name: host
- description: The host to connect to
- default_value: no
- required: false
- - name: port
- description: The port to connect to
- default_value: no
- required: false
- examples:
- folding:
- enabled: true
- title: "Config"
- list:
- - name: Basic (default out-of-the-box)
- description: A basic example configuration, one job will run at a time. Autodetect mechanism uses it by default. As all JOBs have the same name, only one can run at a time.
- config: |
- socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
- localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
- localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
- localipv6:
- name : 'local'
- host : '::1'
- port : 1717
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- local:
- name : 'local'
- host : 'localhost'
- port : 1717
-
- remote:
- name : 'remote'
- host : '192.0.2.1'
- port : 1717
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics refer to the entire monitored application."
- labels: []
- metrics:
- - name: uwsgi.requests
- description: Requests
- unit: "requests/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.tx
- description: Transmitted data
- unit: "KiB/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.avg_rt
- description: Average request time
- unit: "milliseconds"
- chart_type: line
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.memory_rss
- description: RSS (Resident Set Size)
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.memory_vsz
- description: VSZ (Virtual Memory Size)
- unit: "MiB"
- chart_type: stacked
- dimensions:
- - name: a dimension per worker
- - name: uwsgi.exceptions
- description: Exceptions
- unit: "exceptions"
- chart_type: line
- dimensions:
- - name: exceptions
- - name: uwsgi.harakiris
- description: Harakiris
- unit: "harakiris"
- chart_type: line
- dimensions:
- - name: harakiris
- - name: uwsgi.respawns
- description: Respawns
- unit: "respawns"
- chart_type: line
- dimensions:
- - name: respawns
diff --git a/src/collectors/python.d.plugin/uwsgi/uwsgi.chart.py b/src/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
deleted file mode 100644
index e4d900005..000000000
--- a/src/collectors/python.d.plugin/uwsgi/uwsgi.chart.py
+++ /dev/null
@@ -1,177 +0,0 @@
-# -*- coding: utf-8 -*-
-# Description: uwsgi netdata python.d module
-# Author: Robbert Segeren (robbert-ef)
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-import json
-from copy import deepcopy
-
-from bases.FrameworkServices.SocketService import SocketService
-
-ORDER = [
- 'requests',
- 'tx',
- 'avg_rt',
- 'memory_rss',
- 'memory_vsz',
- 'exceptions',
- 'harakiri',
- 'respawn',
-]
-
-DYNAMIC_CHARTS = [
- 'requests',
- 'tx',
- 'avg_rt',
- 'memory_rss',
- 'memory_vsz',
-]
-
-# NOTE: lines are created dynamically in `check()` method
-CHARTS = {
- 'requests': {
- 'options': [None, 'Requests', 'requests/s', 'requests', 'uwsgi.requests', 'stacked'],
- 'lines': [
- ['requests', 'requests', 'incremental']
- ]
- },
- 'tx': {
- 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
- 'lines': [
- ['tx', 'tx', 'incremental']
- ]
- },
- 'avg_rt': {
- 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
- 'lines': [
- ['avg_rt', 'avg_rt', 'absolute']
- ]
- },
- 'memory_rss': {
- 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
- 'lines': [
- ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
- ]
- },
- 'memory_vsz': {
- 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
- 'lines': [
- ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
- ]
- },
- 'exceptions': {
- 'options': [None, 'Exceptions', 'exceptions', 'exceptions', 'uwsgi.exceptions', 'line'],
- 'lines': [
- ['exceptions', 'exceptions', 'incremental']
- ]
- },
- 'harakiri': {
- 'options': [None, 'Harakiris', 'harakiris', 'harakiris', 'uwsgi.harakiris', 'line'],
- 'lines': [
- ['harakiri_count', 'harakiris', 'incremental']
- ]
- },
- 'respawn': {
- 'options': [None, 'Respawns', 'respawns', 'respawns', 'uwsgi.respawns', 'line'],
- 'lines': [
- ['respawn_count', 'respawns', 'incremental']
- ]
- },
-}
-
-
-class Service(SocketService):
- def __init__(self, configuration=None, name=None):
- super(Service, self).__init__(configuration=configuration, name=name)
- self.order = ORDER
- self.definitions = deepcopy(CHARTS)
- self.url = self.configuration.get('host', 'localhost')
- self.port = self.configuration.get('port', 1717)
- # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
- for chart in DYNAMIC_CHARTS:
- self.definitions[chart]['lines'] = []
- self.last_result = {}
- self.workers = []
-
- def read_data(self):
- """
- Read data from socket and parse as JSON.
- :return: (dict) stats
- """
- raw_data = self._get_raw_data()
- if not raw_data:
- return None
- try:
- return json.loads(raw_data)
- except ValueError as err:
- self.error(err)
- return None
-
- def check(self):
- """
- Parse configuration and check if we can read data.
- :return: boolean
- """
- self._parse_config()
- return bool(self.read_data())
-
- def add_worker_dimensions(self, key):
- """
- Helper to add dimensions for a worker.
- :param key: (int or str) worker identifier
- :return:
- """
- for chart in DYNAMIC_CHARTS:
- for line in CHARTS[chart]['lines']:
- dimension_id = '{}_{}'.format(line[0], key)
- dimension_name = str(key)
-
- dimension = [dimension_id, dimension_name] + line[2:]
- self.charts[chart].add_dimension(dimension)
-
- @staticmethod
- def _check_raw_data(data):
- # The server will close the connection when it's done sending
- # data, so just keep looping until that happens.
- return False
-
- def _get_data(self):
- """
- Read data from socket
- :return: dict
- """
- stats = self.read_data()
- if not stats:
- return None
-
- result = {
- 'exceptions': 0,
- 'harakiri_count': 0,
- 'respawn_count': 0,
- }
-
- for worker in stats['workers']:
- key = worker['pid']
-
- # Add dimensions for new workers
- if key not in self.workers:
- self.add_worker_dimensions(key)
- self.workers.append(key)
-
- result['requests_{}'.format(key)] = worker['requests']
- result['tx_{}'.format(key)] = worker['tx']
- result['avg_rt_{}'.format(key)] = worker['avg_rt']
-
- # avg_rt is not reset by uwsgi, so reset here
- if self.last_result.get('requests_{}'.format(key)) == worker['requests']:
- result['avg_rt_{}'.format(key)] = 0
-
- result['memory_rss_{}'.format(key)] = worker['rss']
- result['memory_vsz_{}'.format(key)] = worker['vsz']
-
- result['exceptions'] += worker['exceptions']
- result['harakiri_count'] += worker['harakiri_count']
- result['respawn_count'] += worker['respawn_count']
-
- self.last_result = result
- return result
diff --git a/src/collectors/python.d.plugin/uwsgi/uwsgi.conf b/src/collectors/python.d.plugin/uwsgi/uwsgi.conf
deleted file mode 100644
index 7d09e7330..000000000
--- a/src/collectors/python.d.plugin/uwsgi/uwsgi.conf
+++ /dev/null
@@ -1,92 +0,0 @@
-# netdata python.d.plugin configuration for uwsgi
-#
-# This file is in YaML format. Generally the format is:
-#
-# name: value
-#
-# There are 2 sections:
-# - global variables
-# - one or more JOBS
-#
-# JOBS allow you to collect values from multiple sources.
-# Each source will have its own set of charts.
-#
-# JOB parameters have to be indented (using spaces only, example below).
-
-# ----------------------------------------------------------------------
-# Global Variables
-# These variables set the defaults for all JOBs, however each JOB
-# may define its own, overriding the defaults.
-
-# update_every sets the default data collection frequency.
-# If unset, the python.d.plugin default is used.
-# update_every: 1
-
-# priority controls the order of charts at the netdata dashboard.
-# Lower numbers move the charts towards the top of the page.
-# If unset, the default for python.d.plugin is used.
-# priority: 60000
-
-# penalty indicates whether to apply penalty to update_every in case of failures.
-# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
-# penalty: yes
-
-# autodetection_retry sets the job re-check interval in seconds.
-# The job is not deleted if check fails.
-# Attempts to start the job are made once every autodetection_retry.
-# This feature is disabled by default.
-# autodetection_retry: 0
-
-# ----------------------------------------------------------------------
-# JOBS (data collection sources)
-#
-# The default JOBS share the same *name*. JOBS with the same name
-# are mutually exclusive. Only one of them will be allowed running at
-# any time. This allows autodetection to try several alternatives and
-# pick the one that works.
-#
-# Any number of jobs is supported.
-#
-# All python.d.plugin JOBS (for all its modules) support a set of
-# predefined parameters. These are:
-#
-# job_name:
-# name: myname # the JOB's name as it will appear at the
-# # dashboard (by default is the job_name)
-# # JOBs sharing a name are mutually exclusive
-# update_every: 1 # the JOB's data collection frequency
-# priority: 60000 # the JOB's order on the dashboard
-# penalty: yes # the JOB's penalty
-# autodetection_retry: 0 # the JOB's re-check interval in seconds
-#
-# Additionally to the above, uwsgi also supports the following:
-#
-# socket: 'path/to/uwsgistats.sock'
-#
-# or
-# host: 'IP or HOSTNAME' # the host to connect to
-# port: PORT # the port to connect to
-#
-# ----------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-#
-
-socket:
- name : 'local'
- socket : '/tmp/stats.socket'
-
-localhost:
- name : 'local'
- host : 'localhost'
- port : 1717
-
-localipv4:
- name : 'local'
- host : '127.0.0.1'
- port : 1717
-
-localipv6:
- name : 'local'
- host : '::1'
- port : 1717
diff --git a/src/collectors/python.d.plugin/varnish/integrations/varnish.md b/src/collectors/python.d.plugin/varnish/integrations/varnish.md
index 64da800a3..5850dcc4c 100644
--- a/src/collectors/python.d.plugin/varnish/integrations/varnish.md
+++ b/src/collectors/python.d.plugin/varnish/integrations/varnish.md
@@ -188,6 +188,7 @@ job_name:
### Debug Mode
+
To troubleshoot issues with the `varnish` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -210,4 +211,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin varnish debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `varnish` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep varnish
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep varnish /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep varnish
+```
+
diff --git a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
index 35517aeda..15582879e 100644
--- a/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
+++ b/src/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
@@ -142,6 +142,7 @@ sensors:
### Debug Mode
+
To troubleshoot issues with the `w1sensor` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -164,4 +165,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin w1sensor debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `w1sensor` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep w1sensor
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep w1sensor /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep w1sensor
+```
+
diff --git a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
index 1aceec67d..a5d2a7e47 100644
--- a/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
+++ b/src/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
@@ -170,6 +170,7 @@ local:
### Debug Mode
+
To troubleshoot issues with the `zscores` collector, run the `python.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
@@ -192,4 +193,37 @@ should give you clues as to why the collector isn't working.
./python.d.plugin zscores debug trace
```
+### Getting Logs
+
+If you're encountering problems with the `zscores` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep zscores
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep zscores /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep zscores
+```
+
diff --git a/src/collectors/python.d.plugin/beanstalk/README.md b/src/go/plugin/go.d/modules/beanstalk/README.md
index 4efe13889..4efe13889 120000
--- a/src/collectors/python.d.plugin/beanstalk/README.md
+++ b/src/go/plugin/go.d/modules/beanstalk/README.md
diff --git a/src/collectors/python.d.plugin/beanstalk/metadata.yaml b/src/go/plugin/go.d/modules/beanstalk/metadata.yaml
index 5e370f0a0..60aaf77e5 100644
--- a/src/collectors/python.d.plugin/beanstalk/metadata.yaml
+++ b/src/go/plugin/go.d/modules/beanstalk/metadata.yaml
@@ -1,14 +1,14 @@
-plugin_name: python.d.plugin
+plugin_name: go.d.plugin
modules:
- meta:
- plugin_name: python.d.plugin
+ id: collector-go.d.plugin-beanstalk
+ plugin_name: go.d.plugin
module_name: beanstalk
monitored_instance:
name: Beanstalk
- link: "https://beanstalkd.github.io/"
+ link: https://beanstalkd.github.io/
categories:
- data-collection.message-brokers
- #- data-collection.task-queues
icon_filename: "beanstalk.svg"
related_resources:
integrations:
@@ -22,8 +22,15 @@ modules:
most_popular: false
overview:
data_collection:
- metrics_description: "Monitor Beanstalk metrics to enhance job queueing and processing efficiency. Track job rates, processing times, and queue lengths for better task management."
- method_description: "The collector uses the `beanstalkc` python module to connect to a `beanstalkd` service and gather metrics."
+ metrics_description: |
+ This collector monitors Beanstalk server performance and provides detailed statistics for each tube.
+ method_description: |
+ Using the [beanstalkd protocol](https://github.com/beanstalkd/beanstalkd/blob/master/doc/protocol.txt), it communicates with the Beanstalk daemon to gather essential metrics that help understand the server's performance and activity.
+ Executed commands:
+
+ - [stats](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L553).
+ - [list-tubes](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L688).
+ - [stats-tube](https://github.com/beanstalkd/beanstalkd/blob/91c54fc05dc759ef27459ce4383934e1a4f2fb4b/doc/protocol.txt#L497).
supported_platforms:
include: []
exclude: []
@@ -32,92 +39,68 @@ modules:
description: ""
default_behavior:
auto_detection:
- description: "If no configuration is given, module will attempt to connect to beanstalkd on 127.0.0.1:11300 address."
+ description: |
+ By default, it detects Beanstalk instances running on localhost that are listening on port 11300.
limits:
description: ""
performance_impact:
description: ""
setup:
prerequisites:
- list:
- - title: "beanstalkc python module"
- description: The collector requires the `beanstalkc` python module to be installed.
+ list: []
configuration:
file:
- name: python.d/beanstalk.conf
+ name: go.d/beanstalk.conf
options:
description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ The following options can be defined globally: update_every, autodetection_retry.
folding:
- title: "Config options"
+ title: Config options
enabled: true
list:
- name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
+ description: Data collection frequency.
+ default_value: 1
required: false
- name: autodetection_retry
- description: Sets the job re-check interval in seconds.
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
default_value: 0
required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
+ - name: address
+ description: The IP address and port where the Beanstalk service listens for connections.
+ default_value: 127.0.0.1:11300
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
- required: false
- - name: host
- description: IP or URL to a beanstalk service.
- default_value: "127.0.0.1"
- required: false
- - name: port
- description: Port to the IP or URL to a beanstalk service.
- default_value: "11300"
+ - name: tube_selector
+ description: "Specifies a [pattern](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme) for which Beanstalk tubes Netdata will collect statistics."
+ default_value: "*"
required: false
examples:
folding:
enabled: true
- title: "Config"
+ title: Config
list:
- - name: Remote beanstalk server
- description: A basic remote beanstalk server
- folding:
- enabled: false
+ - name: Basic
+ description: A basic example configuration.
config: |
- remote:
- name: 'beanstalk'
- host: '1.2.3.4'
- port: 11300
+ jobs:
+ - name: local
+ address: 127.0.0.1:11300
- name: Multi-instance
description: |
> **Note**: When you define multiple jobs, their names must be unique.
-
+
Collecting metrics from local and remote instances.
config: |
- localhost:
- name: 'local_beanstalk'
- host: '127.0.0.1'
- port: 11300
-
- remote_job:
- name: 'remote_beanstalk'
- host: '192.0.2.1'
- port: 113000
+ jobs:
+ - name: local
+ address: 127.0.0.1:11300
+
+ - name: remote
+ address: 203.0.113.0:11300
troubleshooting:
problems:
list: []
@@ -137,26 +120,34 @@ modules:
description: "These metrics refer to the entire monitored application."
labels: []
metrics:
- - name: beanstalk.cpu_usage
- description: Cpu Usage
- unit: "cpu time"
- chart_type: area
+ - name: beanstalk.current_jobs
+ description: Current Jobs
+ unit: "jobs"
+ chart_type: stacked
dimensions:
- - name: user
- - name: system
+ - name: ready
+ - name: buried
+ - name: urgent
+ - name: delayed
+ - name: reserved
- name: beanstalk.jobs_rate
description: Jobs Rate
unit: "jobs/s"
chart_type: line
dimensions:
- - name: total
+ - name: created
+ - name: beanstalk.jobs_timeouts
+ description: Timed Out Jobs
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
- name: timeouts
- - name: beanstalk.connections_rate
- description: Connections Rate
- unit: "connections/s"
- chart_type: area
+ - name: beanstalk.current_tubes
+ description: Current Tubes
+ unit: "tubes"
+ chart_type: line
dimensions:
- - name: connections
+ - name: tubes
- name: beanstalk.commands_rate
description: Commands Rate
unit: "commands/s"
@@ -168,6 +159,8 @@ modules:
- name: peek-delayed
- name: peek-buried
- name: reserve
+ - name: reserve-with-timeout
+ - name: touch
- name: use
- name: watch
- name: ignore
@@ -181,38 +174,35 @@ modules:
- name: list-tube-used
- name: list-tubes-watched
- name: pause-tube
- - name: beanstalk.current_tubes
- description: Current Tubes
- unit: "tubes"
- chart_type: area
- dimensions:
- - name: tubes
- - name: beanstalk.current_jobs
- description: Current Jobs
- unit: "jobs"
- chart_type: stacked
- dimensions:
- - name: urgent
- - name: ready
- - name: reserved
- - name: delayed
- - name: buried
- name: beanstalk.current_connections
description: Current Connections
unit: "connections"
chart_type: line
dimensions:
- - name: written
+ - name: open
- name: producers
- name: workers
- name: waiting
- - name: beanstalk.binlog
- description: Binlog
+ - name: beanstalk.connections_rate
+ description: Connections Rate
+ unit: "connections/s"
+ chart_type: area
+ dimensions:
+ - name: created
+ - name: beanstalk.binlog_records
+ description: Binlog Records
unit: "records/s"
chart_type: line
dimensions:
- name: written
- name: migrated
+ - name: beanstalk.cpu_usage
+ description: Cpu Usage
+ unit: "percent"
+ chart_type: stacked
+ dimensions:
+ - name: user
+ - name: system
- name: beanstalk.uptime
description: seconds
unit: "seconds"
@@ -220,44 +210,46 @@ modules:
dimensions:
- name: uptime
- name: tube
- description: "Metrics related to Beanstalk tubes. Each tube produces its own set of the following metrics."
- labels: []
+ description: "Metrics related to Beanstalk tubes. This set of metrics is provided for each tube."
+ labels:
+ - name: tube_name
+ description: Tube name.
metrics:
- - name: beanstalk.jobs_rate
- description: Jobs Rate
- unit: "jobs/s"
- chart_type: area
- dimensions:
- - name: jobs
- - name: beanstalk.jobs
- description: Jobs
+ - name: beanstalk.tube_current_jobs
+ description: Tube Current Jobs
unit: "jobs"
chart_type: stacked
dimensions:
- - name: urgent
- name: ready
- - name: reserved
- - name: delayed
- name: buried
- - name: beanstalk.connections
- description: Connections
+ - name: urgent
+ - name: delayed
+ - name: reserved
+ - name: beanstalk.tube_jobs_rate
+ description: Tube Jobs Rate
+ unit: "jobs/s"
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: beanstalk.tube_commands_rate
+ description: Tube Commands
+ unit: "commands/s"
+ chart_type: stacked
+ dimensions:
+ - name: delete
+ - name: pause-tube
+ - name: beanstalk.tube_current_connections
+ description: Tube Current Connections
unit: "connections"
chart_type: stacked
dimensions:
- name: using
- name: waiting
- name: watching
- - name: beanstalk.commands
- description: Commands
- unit: "commands/s"
- chart_type: stacked
- dimensions:
- - name: deletes
- - name: pauses
- - name: beanstalk.pause
- description: Pause
+ - name: beanstalk.tube_pause_time
+ description: Tube Pause Time
unit: "seconds"
- chart_type: stacked
+ chart_type: line
dimensions:
- name: since
- name: left
diff --git a/src/collectors/python.d.plugin/dovecot/README.md b/src/go/plugin/go.d/modules/dovecot/README.md
index c4749cedc..c4749cedc 120000
--- a/src/collectors/python.d.plugin/dovecot/README.md
+++ b/src/go/plugin/go.d/modules/dovecot/README.md
diff --git a/src/collectors/python.d.plugin/exim/README.md b/src/go/plugin/go.d/modules/exim/README.md
index f1f2ef9f9..f1f2ef9f9 120000
--- a/src/collectors/python.d.plugin/exim/README.md
+++ b/src/go/plugin/go.d/modules/exim/README.md
diff --git a/src/collectors/python.d.plugin/gearman/README.md b/src/go/plugin/go.d/modules/gearman/README.md
index 70189d698..70189d698 120000
--- a/src/collectors/python.d.plugin/gearman/README.md
+++ b/src/go/plugin/go.d/modules/gearman/README.md
diff --git a/src/collectors/python.d.plugin/icecast/README.md b/src/go/plugin/go.d/modules/icecast/README.md
index db3c1b572..db3c1b572 120000
--- a/src/collectors/python.d.plugin/icecast/README.md
+++ b/src/go/plugin/go.d/modules/icecast/README.md
diff --git a/src/collectors/python.d.plugin/ipfs/README.md b/src/go/plugin/go.d/modules/ipfs/README.md
index eee6a07b2..eee6a07b2 120000
--- a/src/collectors/python.d.plugin/ipfs/README.md
+++ b/src/go/plugin/go.d/modules/ipfs/README.md
diff --git a/src/collectors/python.d.plugin/memcached/README.md b/src/go/plugin/go.d/modules/memcached/README.md
index 2cb76d33c..2cb76d33c 120000
--- a/src/collectors/python.d.plugin/memcached/README.md
+++ b/src/go/plugin/go.d/modules/memcached/README.md
diff --git a/src/collectors/python.d.plugin/memcached/integrations/memcached.md b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md
index 5e813eac2..1e653902f 100644
--- a/src/collectors/python.d.plugin/memcached/integrations/memcached.md
+++ b/src/go/plugin/go.d/modules/memcached/integrations/memcached.md
@@ -1,6 +1,6 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/memcached/README.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/python.d.plugin/memcached/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/memcached/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/plugin/go.d/modules/memcached/metadata.yaml"
sidebar_label: "Memcached"
learn_status: "Published"
learn_rel_path: "Collecting Metrics/Databases"
@@ -14,7 +14,7 @@ endmeta-->
<img src="https://netdata.cloud/img/memcached.svg" width="150"/>
-Plugin: python.d.plugin
+Plugin: go.d.plugin
Module: memcached
<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
@@ -23,7 +23,7 @@ Module: memcached
Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching.
-It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats)).
+It reads the server's response to the `stats` command.
This collector is supported on all platforms.
@@ -103,7 +103,7 @@ No action required.
#### File
-The configuration file name for this integration is `python.d/memcached.conf`.
+The configuration file name for this integration is `go.d/memcached.conf`.
You can edit the configuration file using the `edit-config` script from the
@@ -111,75 +111,56 @@ Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netda
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config python.d/memcached.conf
+sudo ./edit-config go.d/memcached.conf
```
#### Options
-There are 2 sections:
-
-* Global variables
-* One or more JOBS that can define multiple different instances to monitor.
-
-The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
-Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
-Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+The following options can be defined globally: update_every, autodetection_retry.
<details open><summary>Config options</summary>
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| host | the host to connect to. | 127.0.0.1 | no |
-| port | the port to connect to. | 11211 | no |
-| update_every | Sets the default data collection frequency. | 10 | no |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| address | The IP address and port where the memcached service listens for connections. | 127.0.0.1:11211 | yes |
+| timeout | Connection, read, and write timeout duration in seconds. The timeout includes name resolution. | 1 | no |
</details>
#### Examples
-##### localhost
-
-An example configuration for localhost.
-
-```yaml
-localhost:
- name: 'local'
- host: 'localhost'
- port: 11211
-
-```
-##### localipv4
+##### Basic
-An example configuration for localipv4.
+A basic example configuration.
<details open><summary>Config</summary>
```yaml
-localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 11211
+jobs:
+ - name: local
+ address: 127.0.0.1:11211
```
</details>
-##### localipv6
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
-An example configuration for localipv6.
<details open><summary>Config</summary>
```yaml
-localhost:
- name: 'local'
- host: '::1'
- port: 11211
+jobs:
+ - name: local
+ address: 127.0.0.1:11211
+
+ - name: remote
+ address: 203.0.113.0:11211
```
</details>
@@ -190,7 +171,9 @@ localhost:
### Debug Mode
-To troubleshoot issues with the `memcached` collector, run the `python.d.plugin` with the debug option enabled. The output
+**Important**: Debug mode is not supported for data collection jobs created via the UI using the Dyncfg feature.
+
+To troubleshoot issues with the `memcached` collector, run the `go.d.plugin` with the debug option enabled. The output
should give you clues as to why the collector isn't working.
- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
@@ -206,10 +189,43 @@ should give you clues as to why the collector isn't working.
sudo -u netdata -s
```
-- Run the `python.d.plugin` to debug the collector:
+- Run the `go.d.plugin` to debug the collector:
```bash
- ./python.d.plugin memcached debug trace
+ ./go.d.plugin -d -m memcached
```
+### Getting Logs
+
+If you're encountering problems with the `memcached` collector, follow these steps to retrieve logs and identify potential issues:
+
+- **Run the command** specific to your system (systemd, non-systemd, or Docker container).
+- **Examine the output** for any warnings or error messages that might indicate issues. These messages should provide clues about the root cause of the problem.
+
+#### System with systemd
+
+Use the following command to view logs generated since the last Netdata service restart:
+
+```bash
+journalctl _SYSTEMD_INVOCATION_ID="$(systemctl show --value --property=InvocationID netdata)" --namespace=netdata --grep memcached
+```
+
+#### System without systemd
+
+Locate the collector log file, typically at `/var/log/netdata/collector.log`, and use `grep` to filter for collector's name:
+
+```bash
+grep memcached /var/log/netdata/collector.log
+```
+
+**Note**: This method shows logs from all restarts. Focus on the **latest entries** for troubleshooting current issues.
+
+#### Docker Container
+
+If your Netdata runs in a Docker container named "netdata" (replace if different), use this command:
+
+```bash
+docker logs netdata 2>&1 | grep memcached
+```
+
diff --git a/src/collectors/python.d.plugin/memcached/metadata.yaml b/src/go/plugin/go.d/modules/memcached/metadata.yaml
index ae420f1c1..c307ef018 100644
--- a/src/collectors/python.d.plugin/memcached/metadata.yaml
+++ b/src/go/plugin/go.d/modules/memcached/metadata.yaml
@@ -1,7 +1,8 @@
-plugin_name: python.d.plugin
+plugin_name: go.d.plugin
modules:
- meta:
- plugin_name: python.d.plugin
+ id: collector-go.d.plugin-memcached
+ plugin_name: go.d.plugin
module_name: memcached
monitored_instance:
name: Memcached
@@ -23,7 +24,7 @@ modules:
overview:
data_collection:
metrics_description: "Monitor Memcached metrics for proficient in-memory key-value store operations. Track cache hits, misses, and memory usage for efficient data caching."
- method_description: "It reads server response to stats command ([stats interface](https://github.com/memcached/memcached/wiki/Commands#stats))."
+ method_description: "It reads the server's response to the `stats` command."
supported_platforms:
include: []
exclude: []
@@ -43,84 +44,53 @@ modules:
list: []
configuration:
file:
- name: python.d/memcached.conf
- description: ""
+ name: go.d/memcached.conf
options:
description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ The following options can be defined globally: update_every, autodetection_retry.
folding:
title: Config options
enabled: true
list:
- - name: host
- description: the host to connect to.
- default_value: "127.0.0.1"
- required: false
- - name: port
- description: the port to connect to.
- default_value: "11211"
- required: false
- name: update_every
- description: Sets the default data collection frequency.
- default_value: 10
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
+ description: Data collection frequency.
+ default_value: 1
required: false
- name: autodetection_retry
- description: Sets the job re-check interval in seconds.
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
default_value: 0
required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- - name: name
- description: Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: ""
+ - name: address
+ description: The IP address and port where the memcached service listens for connections.
+ default_value: 127.0.0.1:11211
+ required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
required: false
examples:
folding:
+ title: Config
enabled: true
- title: "Config"
list:
- - name: localhost
- description: An example configuration for localhost.
- folding:
- enabled: false
- config: |
- localhost:
- name: 'local'
- host: 'localhost'
- port: 11211
- - name: localipv4
- description: An example configuration for localipv4.
- folding:
- enabled: true
+ - name: Basic
+ description: A basic example configuration.
config: |
- localhost:
- name: 'local'
- host: '127.0.0.1'
- port: 11211
- - name: localipv6
- description: An example configuration for localipv6.
- folding:
- enabled: true
+ jobs:
+ - name: local
+ address: 127.0.0.1:11211
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
config: |
- localhost:
- name: 'local'
- host: '::1'
- port: 11211
+ jobs:
+ - name: local
+ address: 127.0.0.1:11211
+
+ - name: remote
+ address: 203.0.113.0:11211
troubleshooting:
problems:
list: []
diff --git a/src/collectors/python.d.plugin/monit/README.md b/src/go/plugin/go.d/modules/monit/README.md
index ac69496f4..ac69496f4 120000
--- a/src/collectors/python.d.plugin/monit/README.md
+++ b/src/go/plugin/go.d/modules/monit/README.md
diff --git a/src/collectors/python.d.plugin/postfix/README.md b/src/go/plugin/go.d/modules/postfix/README.md
index c62eb5c24..c62eb5c24 120000
--- a/src/collectors/python.d.plugin/postfix/README.md
+++ b/src/go/plugin/go.d/modules/postfix/README.md
diff --git a/src/collectors/python.d.plugin/puppet/README.md b/src/go/plugin/go.d/modules/puppet/README.md
index b6c4c83f9..b6c4c83f9 120000
--- a/src/collectors/python.d.plugin/puppet/README.md
+++ b/src/go/plugin/go.d/modules/puppet/README.md
diff --git a/src/collectors/python.d.plugin/rethinkdbs/README.md b/src/go/plugin/go.d/modules/rethinkdb/README.md
index 78ddcfa18..78ddcfa18 120000
--- a/src/collectors/python.d.plugin/rethinkdbs/README.md
+++ b/src/go/plugin/go.d/modules/rethinkdb/README.md
diff --git a/src/collectors/python.d.plugin/riakkv/metadata.yaml b/src/go/plugin/go.d/modules/riakkv/metadata.yaml
index d68e73053..435cc4f9b 100644
--- a/src/collectors/python.d.plugin/riakkv/metadata.yaml
+++ b/src/go/plugin/go.d/modules/riakkv/metadata.yaml
@@ -1,17 +1,19 @@
-plugin_name: python.d.plugin
+plugin_name: go.d.plugin
modules:
- meta:
- plugin_name: python.d.plugin
+ id: collector-go.d.plugin-riakkv
+ plugin_name: go.d.plugin
module_name: riakkv
monitored_instance:
- name: RiakKV
- link: "https://riak.com/products/riak-kv/index.html"
+ name: Riak KV
+ link: https://riak.com/products/riak-kv/index.html
categories:
- data-collection.database-servers
icon_filename: "riak.svg"
related_resources:
integrations:
list: []
+ alternative_monitored_instances: []
info_provided_to_referring_integrations:
description: ""
keywords:
@@ -22,119 +24,154 @@ modules:
overview:
data_collection:
metrics_description: |
- This collector monitors RiakKV metrics about throughput, latency, resources and more.'
- method_description: "This collector reads the database stats from the `/stats` endpoint."
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
+ This collector monitors RiakKV metrics about throughput, latency, resources and more.
+ method_description: |
+ It sends HTTP requests to the Riak [/stats](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html) endpoint.
default_behavior:
auto_detection:
- description: "If the /stats endpoint is accessible, RiakKV instances on the local host running on port 8098 will be autodetected."
+ description: |
+ By default, it detects Riak instances running on localhost that are listening on port 8098.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:8098/stats
limits:
description: ""
performance_impact:
description: ""
+ additional_permissions:
+ description: ""
+ multi_instance: true
+ supported_platforms:
+ include: []
+ exclude: []
setup:
prerequisites:
list:
- - title: Configure RiakKV to enable /stats endpoint
+ - title: Enable /stats endpoint
description: |
- You can follow the RiakKV configuration reference documentation for how to enable this.
-
- Source : https://docs.riak.com/riak/kv/2.2.3/configuring/reference/#client-interfaces
+ See the RiakKV [configuration reference](https://docs.riak.com/riak/kv/2.2.3/developing/api/http/status.1.html).
configuration:
file:
- name: "python.d/riakkv.conf"
+ name: go.d/riakkv.conf
options:
description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ The following options can be defined globally: update_every, autodetection_retry.
folding:
- title: "Config options"
+ title: Config options
enabled: true
list:
- name: update_every
- description: Sets the default data collection frequency.
- default_value: 5
- required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
+ description: Data collection frequency.
+ default_value: 1
required: false
- name: autodetection_retry
- description: Sets the job re-check interval in seconds.
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
default_value: 0
required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
- required: false
- name: url
- description: The url of the server
- default_value: no
+ description: Server URL.
+ default_value: http://127.0.0.1:8098/stats
required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: GET
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
examples:
folding:
+ title: Config
enabled: true
- title: "Config"
list:
- - name: Basic (default)
+ - name: Basic
+ description: A basic example configuration.
folding:
enabled: false
- description: A basic example configuration per job
config: |
- local:
- url: 'http://localhost:8098/stats'
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: With enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
+ tls_skip_verify: yes
- name: Multi-instance
description: |
> **Note**: When you define multiple jobs, their names must be unique.
Collecting metrics from local and remote instances.
config: |
- local:
- url: 'http://localhost:8098/stats'
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8098/stats
- remote:
- url: 'http://192.0.2.1:8098/stats'
+ - name: remote
+ url: http://192.0.2.1:8098/stats
troubleshooting:
problems:
list: []
- alerts:
- - name: riakkv_1h_kv_get_mean_latency
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.kv.latency.get
- info: average time between reception of client GET request and subsequent response to client over the last hour
- - name: riakkv_kv_get_slow
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.kv.latency.get
- info: average time between reception of client GET request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
- - name: riakkv_1h_kv_put_mean_latency
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.kv.latency.put
- info: average time between reception of client PUT request and subsequent response to the client over the last hour
- - name: riakkv_kv_put_slow
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.kv.latency.put
- info: average time between reception of client PUT request and subsequent response to the client over the last 3 minutes, compared to the average over the last hour
- - name: riakkv_vm_high_process_count
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.vm
- info: number of processes running in the Erlang VM
- - name: riakkv_list_keys_active
- link: https://github.com/netdata/netdata/blob/master/src/health/health.d/riakkv.conf
- metric: riak.core.fsm_active
- info: number of currently running list keys finite state machines
+ alerts: []
metrics:
folding:
title: Metrics
@@ -143,7 +180,7 @@ modules:
availability: []
scopes:
- name: global
- description: "These metrics refer to the entire monitored application."
+ description: These metrics refer to the entire monitored application.
labels: []
metrics:
- name: riak.kv.throughput
@@ -316,11 +353,13 @@ modules:
- name: "99"
- name: "100"
- name: riak.search.index
- description: Number of document index errors encountered by Search
+ description: Errors encountered by Search
unit: "errors"
chart_type: line
dimensions:
- - name: errors
+ - name: index_fail
+ - name: bad_entry
+ - name: extract_fail
- name: riak.core.protobuf_connections
description: Protocol buffer connections by status
unit: "connections"
@@ -349,10 +388,3 @@ modules:
dimensions:
- name: get
- name: put
- - name: riak.search.index
- description: Number of writes to Search failed due to bad data format by reason
- unit: "writes"
- chart_type: line
- dimensions:
- - name: bad_entry
- - name: extract_fail
diff --git a/src/collectors/python.d.plugin/squid/README.md b/src/go/plugin/go.d/modules/squid/README.md
index c4e5a03d7..c4e5a03d7 120000
--- a/src/collectors/python.d.plugin/squid/README.md
+++ b/src/go/plugin/go.d/modules/squid/README.md
diff --git a/src/collectors/python.d.plugin/squid/metadata.yaml b/src/go/plugin/go.d/modules/squid/metadata.yaml
index d0c5b3ecc..fbe0202ee 100644
--- a/src/collectors/python.d.plugin/squid/metadata.yaml
+++ b/src/go/plugin/go.d/modules/squid/metadata.yaml
@@ -1,11 +1,12 @@
-plugin_name: python.d.plugin
+plugin_name: go.d.plugin
modules:
- meta:
- plugin_name: python.d.plugin
+ id: collector-go.d.plugin-squid
+ plugin_name: go.d.plugin
module_name: squid
monitored_instance:
name: Squid
- link: "http://www.squid-cache.org/"
+ link: "https://www.squid-cache.org/"
categories:
- data-collection.web-servers-and-web-proxies
icon_filename: "squid.png"
@@ -23,7 +24,7 @@ modules:
data_collection:
metrics_description: |
This collector monitors statistics about the Squid Clients and Servers, like bandwidth and requests.
- method_description: "It collects metrics from the endpoint where Squid exposes its `counters` data."
+ method_description: "It collects metrics from the `squid-internal-mgr/counters` endpoint."
supported_platforms:
include: []
exclude: []
@@ -32,101 +33,121 @@ modules:
description: ""
default_behavior:
auto_detection:
- description: "By default, this collector will try to autodetect where Squid presents its `counters` data, by trying various configurations."
+ description: |
+ By default, it detects Squid instances running on localhost that are listening on port 3128.
+ On startup, it tries to collect metrics from:
+
+ - https://127.0.0.1:3128
limits:
description: ""
performance_impact:
description: ""
setup:
prerequisites:
- list:
- - title: Configure Squid's Cache Manager
- description: |
- Take a look at [Squid's official documentation](https://wiki.squid-cache.org/Features/CacheManager/Index#controlling-access-to-the-cache-manager) on how to configure access to the Cache Manager.
+ list: []
configuration:
file:
- name: "python.d/squid.conf"
+ name: "go.d/squid.conf"
options:
description: |
- There are 2 sections:
-
- * Global variables
- * One or more JOBS that can define multiple different instances to monitor.
-
- The following options can be defined globally: priority, penalty, autodetection_retry, update_every, but can also be defined per JOB to override the global values.
-
- Additionally, the following collapsed table contains all the options that can be configured inside a JOB definition.
-
- Every configuration JOB starts with a `job_name` value which will appear in the dashboard, unless a `name` parameter is specified.
+ The following options can be defined globally: update_every, autodetection_retry.
folding:
title: "Config options"
enabled: true
list:
- name: update_every
- description: Sets the default data collection frequency.
+ description: Data collection frequency.
default_value: 1
required: false
- - name: priority
- description: Controls the order of charts at the netdata dashboard.
- default_value: 60000
- required: false
- name: autodetection_retry
- description: Sets the job re-check interval in seconds.
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
default_value: 0
required: false
- - name: penalty
- description: Indicates whether to apply penalty to update_every in case of failures.
- default_value: yes
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:3128
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
required: false
- - name: name
- description: >
- Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works.
- default_value: "local"
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
required: false
- - name: host
- description: The host to connect to.
+ - name: password
+ description: Password for basic HTTP authentication.
default_value: ""
- required: true
- - name: port
- description: The port to connect to.
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
default_value: ""
- required: true
- - name: request
- description: The URL to request from Squid.
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
default_value: ""
- required: true
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: POST
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: false
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: false
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
examples:
folding:
enabled: true
- title: "Config"
+ title: Config
list:
- name: Basic
- description: A basic configuration example.
+ description: A basic example configuration.
folding:
enabled: false
config: |
- example_job_name:
- name: 'local'
- host: 'localhost'
- port: 3128
- request: 'cache_object://localhost:3128/counters'
+ jobs:
+ - name: local
+ url: http://127.0.0.1:3128
- name: Multi-instance
description: |
> **Note**: When you define multiple jobs, their names must be unique.
-
+
Collecting metrics from local and remote instances.
config: |
- local_job:
- name: 'local'
- host: '127.0.0.1'
- port: 3128
- request: 'cache_object://127.0.0.1:3128/counters'
-
- remote_job:
- name: 'remote'
- host: '192.0.2.1'
- port: 3128
- request: 'cache_object://192.0.2.1:3128/counters'
+ jobs:
+ - name: local
+ url: http://127.0.0.1:3128
+
+ - name: remote
+ url: http://192.0.2.1:3128
troubleshooting:
problems:
list: []
diff --git a/src/collectors/python.d.plugin/tomcat/README.md b/src/go/plugin/go.d/modules/tomcat/README.md
index 997090c35..997090c35 120000
--- a/src/collectors/python.d.plugin/tomcat/README.md
+++ b/src/go/plugin/go.d/modules/tomcat/README.md
diff --git a/src/collectors/python.d.plugin/tor/README.md b/src/go/plugin/go.d/modules/tor/README.md
index 7c20cd40a..7c20cd40a 120000
--- a/src/collectors/python.d.plugin/tor/README.md
+++ b/src/go/plugin/go.d/modules/tor/README.md
diff --git a/src/collectors/python.d.plugin/uwsgi/README.md b/src/go/plugin/go.d/modules/uwsgi/README.md
index 44b855949..44b855949 120000
--- a/src/collectors/python.d.plugin/uwsgi/README.md
+++ b/src/go/plugin/go.d/modules/uwsgi/README.md