summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/python.d.plugin')
-rw-r--r--collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py2
-rw-r--r--collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md9
-rw-r--r--collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md23
-rw-r--r--collectors/python.d.plugin/am2320/integrations/am2320.md11
-rw-r--r--collectors/python.d.plugin/beanstalk/integrations/beanstalk.md15
-rw-r--r--collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md13
-rw-r--r--collectors/python.d.plugin/boinc/integrations/boinc.md17
-rw-r--r--collectors/python.d.plugin/ceph/integrations/ceph.md17
-rw-r--r--collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md19
-rw-r--r--collectors/python.d.plugin/dovecot/integrations/dovecot.md17
-rw-r--r--collectors/python.d.plugin/example/integrations/example_collector.md17
-rw-r--r--collectors/python.d.plugin/exim/integrations/exim.md13
-rw-r--r--collectors/python.d.plugin/fail2ban/integrations/fail2ban.md19
-rw-r--r--collectors/python.d.plugin/gearman/integrations/gearman.md21
-rw-r--r--collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md21
-rw-r--r--collectors/python.d.plugin/haproxy/haproxy.chart.py10
-rw-r--r--collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md17
-rw-r--r--collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md15
-rw-r--r--collectors/python.d.plugin/icecast/integrations/icecast.md17
-rw-r--r--collectors/python.d.plugin/ipfs/integrations/ipfs.md17
-rw-r--r--collectors/python.d.plugin/litespeed/integrations/litespeed.md13
-rw-r--r--collectors/python.d.plugin/megacli/integrations/megacli.md11
-rw-r--r--collectors/python.d.plugin/megacli/megacli.chart.py6
-rw-r--r--collectors/python.d.plugin/memcached/integrations/memcached.md15
-rw-r--r--collectors/python.d.plugin/monit/integrations/monit.md17
-rw-r--r--collectors/python.d.plugin/nsd/integrations/name_server_daemon.md13
-rw-r--r--collectors/python.d.plugin/openldap/integrations/openldap.md27
-rw-r--r--collectors/python.d.plugin/oracledb/integrations/oracle_db.md19
-rw-r--r--collectors/python.d.plugin/pandas/integrations/pandas.md27
-rw-r--r--collectors/python.d.plugin/postfix/integrations/postfix.md11
-rw-r--r--collectors/python.d.plugin/puppet/integrations/puppet.md21
-rw-r--r--collectors/python.d.plugin/python.d.plugin.in25
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py6
-rw-r--r--collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py19
-rw-r--r--collectors/python.d.plugin/python_modules/bases/loggers.py182
-rw-r--r--collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md21
-rw-r--r--collectors/python.d.plugin/retroshare/integrations/retroshare.md13
-rw-r--r--collectors/python.d.plugin/riakkv/integrations/riakkv.md11
-rw-r--r--collectors/python.d.plugin/samba/integrations/samba.md9
-rw-r--r--collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md11
-rw-r--r--collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md17
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.chart.py20
-rw-r--r--collectors/python.d.plugin/spigotmc/integrations/spigotmc.md17
-rw-r--r--collectors/python.d.plugin/squid/integrations/squid.md17
-rw-r--r--collectors/python.d.plugin/tomcat/integrations/tomcat.md17
-rw-r--r--collectors/python.d.plugin/tor/integrations/tor.md17
-rw-r--r--collectors/python.d.plugin/uwsgi/integrations/uwsgi.md17
-rw-r--r--collectors/python.d.plugin/varnish/integrations/varnish.md13
-rw-r--r--collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md13
-rw-r--r--collectors/python.d.plugin/zscores/integrations/python.d_zscores.md29
50 files changed, 509 insertions, 455 deletions
diff --git a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
index bb59d88e1..1995ad681 100644
--- a/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
+++ b/collectors/python.d.plugin/adaptec_raid/adaptec_raid.chart.py
@@ -87,7 +87,7 @@ def find_pds(d):
elif row.startswith('Temperature'):
v = row.split(':')[-1].split()[0]
pd.temperature = v
- elif row.startswith('NCQ status'):
+ elif row.startswith(('NCQ status', 'Device Phy')) or not row:
if pd.id and pd.state and pd.smart_warnings:
pds.append(pd)
pd = PD()
diff --git a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
index 59e359d0d..13d22ba54 100644
--- a/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
+++ b/collectors/python.d.plugin/adaptec_raid/integrations/adaptecraid.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "AdaptecRAID"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -150,10 +151,10 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
</details>
diff --git a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
index 95e4a4a3b..9fb69878a 100644
--- a/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
+++ b/collectors/python.d.plugin/alarms/integrations/netdata_agent_alarms.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Netdata Agent alarms"
learn_status: "Published"
learn_rel_path: "Data Collection/Other"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -110,17 +111,17 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | True |
-| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | True |
-| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | False | True |
-| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | True |
-| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | True |
-| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | True |
-| update_every | Sets the default data collection frequency. | 10 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| url | Netdata agent alarms endpoint to collect from. Can be local or remote so long as reachable by agent. | http://127.0.0.1:19999/api/v1/alarms?all | yes |
+| status_map | Mapping of alarm status to integer number that will be the metric value collected. | {"CLEAR": 0, "WARNING": 1, "CRITICAL": 2} | yes |
+| collect_alarm_values | set to true to include a chart with calculated alarm values over time. | no | yes |
+| alarm_status_chart_type | define the type of chart for plotting status over time e.g. 'line' or 'stacked'. | line | yes |
+| alarm_contains_words | A "," separated list of words you want to filter alarm names for. For example 'cpu,load' would filter for only alarms with "cpu" or "load" in alarm name. Default includes all. | | yes |
+| alarm_excludes_words | A "," separated list of words you want to exclude based on alarm name. For example 'cpu,load' would exclude all alarms with "cpu" or "load" in alarm name. Default excludes None. | | yes |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/am2320/integrations/am2320.md b/collectors/python.d.plugin/am2320/integrations/am2320.md
index 9b41a8fd6..72b351eb5 100644
--- a/collectors/python.d.plugin/am2320/integrations/am2320.md
+++ b/collectors/python.d.plugin/am2320/integrations/am2320.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "AM2320"
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -130,11 +131,11 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
index cf2f0dac1..5095c0c28 100644
--- a/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
+++ b/collectors/python.d.plugin/beanstalk/integrations/beanstalk.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Beanstalk"
learn_status: "Published"
learn_rel_path: "Data Collection/Message Brokers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -141,13 +142,13 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| host | IP or URL to a beanstalk service. | 127.0.0.1 | False |
-| port | Port to the IP or URL to a beanstalk service. | 11300 | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | IP or URL to a beanstalk service. | 127.0.0.1 | no |
+| port | Port to the IP or URL to a beanstalk service. | 11300 | no |
</details>
diff --git a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
index cc847272d..163f8282c 100644
--- a/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
+++ b/collectors/python.d.plugin/bind_rndc/integrations/isc_bind_rndc.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "ISC Bind (RNDC)"
learn_status: "Published"
learn_rel_path: "Data Collection/DNS and DHCP Servers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -163,12 +164,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| named_stats_path | Path to the named stats, after being dumped by `nrdc` | /var/log/bind/named.stats | no |
</details>
diff --git a/collectors/python.d.plugin/boinc/integrations/boinc.md b/collectors/python.d.plugin/boinc/integrations/boinc.md
index 961f79537..d6874d455 100644
--- a/collectors/python.d.plugin/boinc/integrations/boinc.md
+++ b/collectors/python.d.plugin/boinc/integrations/boinc.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "BOINC"
learn_status: "Published"
learn_rel_path: "Data Collection/Distributed Computing Systems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -123,14 +124,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| hostname | Define a hostname where boinc is running. | localhost | False |
-| port | The port of boinc RPC interface. | | False |
-| password | Provide a password to connect to a boinc RPC interface. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| hostname | Define a hostname where boinc is running. | localhost | no |
+| port | The port of boinc RPC interface. | | no |
+| password | Provide a password to connect to a boinc RPC interface. | | no |
</details>
diff --git a/collectors/python.d.plugin/ceph/integrations/ceph.md b/collectors/python.d.plugin/ceph/integrations/ceph.md
index 051121148..cfda01fbe 100644
--- a/collectors/python.d.plugin/ceph/integrations/ceph.md
+++ b/collectors/python.d.plugin/ceph/integrations/ceph.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Ceph"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -139,14 +140,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| config_file | Ceph config file | | True |
-| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | True |
-| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| config_file | Ceph config file | | yes |
+| keyring_file | Ceph keyring file. netdata user must be added into ceph group and keyring file must be read group permission. | | yes |
+| rados_id | A rados user id to use for connecting to the Ceph cluster. | admin | no |
</details>
diff --git a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
index 2265d9620..c338c9374 100644
--- a/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
+++ b/collectors/python.d.plugin/changefinder/integrations/python.d_changefinder.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "python.d changefinder"
learn_status: "Published"
learn_rel_path: "Data Collection/Other"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -144,15 +145,15 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | True |
-| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | False |
-| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | True |
-| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | False |
-| cf_order | default parameters that can be passed to the changefinder library. | 1 | False |
-| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | False |
-| cf_threshold | the percentile above which scores will be flagged. | 99 | False |
-| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | False |
-| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | False | False |
+| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
+| charts_to_exclude | charts to exclude, useful if you would like to exclude some specific charts. note: should be a ',' separated string like 'chart.name,chart.name'. | | no |
+| mode | get ChangeFinder scores 'per_dim' or 'per_chart'. | per_chart | yes |
+| cf_r | default parameters that can be passed to the changefinder library. | 0.5 | no |
+| cf_order | default parameters that can be passed to the changefinder library. | 1 | no |
+| cf_smooth | default parameters that can be passed to the changefinder library. | 15 | no |
+| cf_threshold | the percentile above which scores will be flagged. | 99 | no |
+| n_score_samples | the number of recent scores to use when calculating the percentile of the changefinder score. | 14400 | no |
+| show_scores | set to true if you also want to chart the percentile scores in addition to the flags. (mainly useful for debugging or if you want to dive deeper on how the scores are evolving over time) | no | no |
</details>
diff --git a/collectors/python.d.plugin/dovecot/integrations/dovecot.md b/collectors/python.d.plugin/dovecot/integrations/dovecot.md
index 4057a5b6c..4e7952765 100644
--- a/collectors/python.d.plugin/dovecot/integrations/dovecot.md
+++ b/collectors/python.d.plugin/dovecot/integrations/dovecot.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Dovecot"
learn_status: "Published"
learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -123,14 +124,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | False |
-| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | False |
-| port | Used in combination with host, configures the port devcot listens to. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| socket | Use this socket to communicate with Devcot | /var/run/dovecot/stats | no |
+| host | Instead of using a socket, you can point the collector to an ip for devcot statistics. | | no |
+| port | Used in combination with host, configures the port devcot listens to. | | no |
</details>
diff --git a/collectors/python.d.plugin/example/integrations/example_collector.md b/collectors/python.d.plugin/example/integrations/example_collector.md
index 44b405a7d..7dded67ba 100644
--- a/collectors/python.d.plugin/example/integrations/example_collector.md
+++ b/collectors/python.d.plugin/example/integrations/example_collector.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Example collector"
learn_status: "Published"
learn_rel_path: "Data Collection/Other"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -110,14 +111,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| num_lines | The number of lines to create. | 4 | False |
-| lower | The lower bound of numbers to randomly sample from. | 0 | False |
-| upper | The upper bound of numbers to randomly sample from. | 100 | False |
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| num_lines | The number of lines to create. | 4 | no |
+| lower | The lower bound of numbers to randomly sample from. | 0 | no |
+| upper | The upper bound of numbers to randomly sample from. | 100 | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/exim/integrations/exim.md b/collectors/python.d.plugin/exim/integrations/exim.md
index 328d17870..f0ae33d3e 100644
--- a/collectors/python.d.plugin/exim/integrations/exim.md
+++ b/collectors/python.d.plugin/exim/integrations/exim.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Exim"
learn_status: "Published"
learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -129,12 +130,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| command | Path and command to the `exim` binary | exim -bpc | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| command | Path and command to the `exim` binary | exim -bpc | no |
</details>
diff --git a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
index 64bfe21ba..a7116be5e 100644
--- a/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
+++ b/collectors/python.d.plugin/fail2ban/integrations/fail2ban.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Fail2ban"
learn_status: "Published"
learn_rel_path: "Data Collection/Authentication and Authorization"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -149,15 +150,15 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| log_path | path to fail2ban.log. | /var/log/fail2ban.log | False |
-| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | False |
-| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | False |
-| exclude | jails you want to exclude from autodetection. | | False |
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| log_path | path to fail2ban.log. | /var/log/fail2ban.log | no |
+| conf_path | path to jail.local/jail.conf. | /etc/fail2ban/jail.local | no |
+| conf_dir | path to jail.d/. | /etc/fail2ban/jail.d/ | no |
+| exclude | jails you want to exclude from autodetection. | | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/gearman/integrations/gearman.md b/collectors/python.d.plugin/gearman/integrations/gearman.md
index f988e7448..3923d1401 100644
--- a/collectors/python.d.plugin/gearman/integrations/gearman.md
+++ b/collectors/python.d.plugin/gearman/integrations/gearman.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Gearman"
learn_status: "Published"
learn_rel_path: "Data Collection/Distributed Computing Systems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -129,16 +130,16 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| host | URL or IP where gearman is running. | localhost | False |
-| port | Port of URL or IP where gearman is running. | 4730 | False |
-| tls | Use tls to connect to gearman. | false | False |
-| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | False |
-| key | Provide a key file if needed to connect to a TLS gearman instance. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | URL or IP where gearman is running. | localhost | no |
+| port | Port of URL or IP where gearman is running. | 4730 | no |
+| tls | Use tls to connect to gearman. | false | no |
+| cert | Provide a certificate file if needed to connect to a TLS gearman instance. | | no |
+| key | Provide a key file if needed to connect to a TLS gearman instance. | | no |
</details>
diff --git a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
index be4db4b70..8d61fa2ae 100644
--- a/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
+++ b/collectors/python.d.plugin/go_expvar/integrations/go_applications_expvar.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Go applications (EXPVAR)"
learn_status: "Published"
learn_rel_path: "Data Collection/APM"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -195,16 +196,16 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | True |
-| user | If the URL is password protected, this is the username to use. | | False |
-| pass | If the URL is password protected, this is the password to use. | | False |
-| collect_memstats | Enables charts for Go runtime's memory statistics. | | False |
-| extra_charts | Defines extra data/charts to monitor, please see the example below. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | the URL and port of the expvar endpoint. Please include the whole path of the endpoint, as the expvar handler can be installed in a non-standard location. | | yes |
+| user | If the URL is password protected, this is the username to use. | | no |
+| pass | If the URL is password protected, this is the password to use. | | no |
+| collect_memstats | Enables charts for Go runtime's memory statistics. | | no |
+| extra_charts | Defines extra data/charts to monitor, please see the example below. | | no |
</details>
diff --git a/collectors/python.d.plugin/haproxy/haproxy.chart.py b/collectors/python.d.plugin/haproxy/haproxy.chart.py
index 6f94c9a07..f412febb7 100644
--- a/collectors/python.d.plugin/haproxy/haproxy.chart.py
+++ b/collectors/python.d.plugin/haproxy/haproxy.chart.py
@@ -44,6 +44,7 @@ ORDER = [
'bctime',
'health_sup',
'health_sdown',
+ 'health_smaint',
'health_bdown',
'health_idle'
]
@@ -167,6 +168,10 @@ CHARTS = {
'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
'lines': []
},
+ 'health_smaint': {
+ 'options': [None, 'Backend Servers In MAINT State', 'maintenance servers', 'health', 'haproxy_hs.maint', 'line'],
+ 'lines': []
+ },
'health_bdown': {
'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
'lines': []
@@ -267,6 +272,8 @@ class Service(UrlService, SocketService):
if server_status(server, name, 'UP')])
stat_data['hsdown_' + idx] = len([server for server in self.data['servers']
if server_status(server, name, 'DOWN')])
+ stat_data['hsmaint_' + idx] = len([server for server in self.data['servers']
+ if server_status(server, name, 'MAINT')])
stat_data['hbdown_' + idx] = 1 if backend.get('status') == 'DOWN' else 0
for metric in BACKEND_METRICS:
stat_data['_'.join(['backend', metric, idx])] = backend.get(metric) or 0
@@ -321,6 +328,7 @@ class Service(UrlService, SocketService):
BACKEND_METRICS[metric]['divisor']])
self.definitions['health_sup']['lines'].append(['hsup_' + idx, name, 'absolute'])
self.definitions['health_sdown']['lines'].append(['hsdown_' + idx, name, 'absolute'])
+ self.definitions['health_smaint']['lines'].append(['hsmaint_' + idx, name, 'absolute'])
self.definitions['health_bdown']['lines'].append(['hbdown_' + idx, name, 'absolute'])
@@ -352,7 +360,7 @@ def parse_data_(data):
def server_status(server, backend_name, status='DOWN'):
- return server.get('# pxname') == backend_name and server.get('status') == status
+ return server.get('# pxname') == backend_name and server.get('status').partition(' ')[0] == status
def url_remove_params(url):
diff --git a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
index 29512bba3..4a1504f07 100644
--- a/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
+++ b/collectors/python.d.plugin/hddtemp/integrations/hdd_temperature.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "HDD temperature"
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -121,14 +122,14 @@ By default this collector will try to autodetect disks (autodetection works only
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | False |
-| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | False |
-| host | The IP or HOSTNAME to connect to. | localhost | True |
-| port | The port to connect to. | 7634 | False |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| devices | Array of desired disks to detect, in case their name doesn't start with `sd`. | | no |
+| host | The IP or HOSTNAME to connect to. | localhost | yes |
+| port | The port to connect to. | 7634 | no |
</details>
diff --git a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
index 8ec7a5c5c..d46cc9065 100644
--- a/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
+++ b/collectors/python.d.plugin/hpssa/integrations/hp_smart_storage_arrays.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "HP Smart Storage Arrays"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -152,13 +153,13 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | False |
-| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| ssacli_path | Path to the `ssacli` command line utility. Configure this if `ssacli` is not in the $PATH | | no |
+| use_sudo | Whether or not to use `sudo` to execute `ssacli` | True | no |
</details>
diff --git a/collectors/python.d.plugin/icecast/integrations/icecast.md b/collectors/python.d.plugin/icecast/integrations/icecast.md
index 06c317864..12d7d59ee 100644
--- a/collectors/python.d.plugin/icecast/integrations/icecast.md
+++ b/collectors/python.d.plugin/icecast/integrations/icecast.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Icecast"
learn_status: "Published"
learn_rel_path: "Data Collection/Media Services"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -112,14 +113,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | False |
-| user | Username to use to connect to `url` if it's password protected. | | False |
-| pass | Password to use to connect to `url` if it's password protected. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | The URL (and port) to the icecast server. Needs to also include `/status-json.xsl` | http://localhost:8443/status-json.xsl | no |
+| user | Username to use to connect to `url` if it's password protected. | | no |
+| pass | Password to use to connect to `url` if it's password protected. | | no |
</details>
diff --git a/collectors/python.d.plugin/ipfs/integrations/ipfs.md b/collectors/python.d.plugin/ipfs/integrations/ipfs.md
index c43c27b34..77dc745aa 100644
--- a/collectors/python.d.plugin/ipfs/integrations/ipfs.md
+++ b/collectors/python.d.plugin/ipfs/integrations/ipfs.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "IPFS"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -121,14 +122,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | False |
-| url | URL to the IPFS API | no | True |
-| repoapi | Collect repo metrics. | no | False |
-| pinapi | Set status of IPFS pinned object polling. | no | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
+| url | URL to the IPFS API | no | yes |
+| repoapi | Collect repo metrics. | no | no |
+| pinapi | Set status of IPFS pinned object polling. | no | no |
</details>
diff --git a/collectors/python.d.plugin/litespeed/integrations/litespeed.md b/collectors/python.d.plugin/litespeed/integrations/litespeed.md
index 511c112e9..87f2d0b12 100644
--- a/collectors/python.d.plugin/litespeed/integrations/litespeed.md
+++ b/collectors/python.d.plugin/litespeed/integrations/litespeed.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Litespeed"
learn_status: "Published"
learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -117,12 +118,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| path | Use a different path than the default, where the lightspeed stats files reside. | /tmp/lshttpd/ | no |
</details>
diff --git a/collectors/python.d.plugin/megacli/integrations/megacli.md b/collectors/python.d.plugin/megacli/integrations/megacli.md
index bb3bdf6f2..0c4af78a9 100644
--- a/collectors/python.d.plugin/megacli/integrations/megacli.md
+++ b/collectors/python.d.plugin/megacli/integrations/megacli.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "MegaCLI"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -165,11 +166,11 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| do_battery | default is no. Battery stats (adds additional call to megacli `megacli -AdpBbuCmd -a0`). | no | no |
</details>
diff --git a/collectors/python.d.plugin/megacli/megacli.chart.py b/collectors/python.d.plugin/megacli/megacli.chart.py
index ef35ff63f..8222092a8 100644
--- a/collectors/python.d.plugin/megacli/megacli.chart.py
+++ b/collectors/python.d.plugin/megacli/megacli.chart.py
@@ -91,7 +91,7 @@ def battery_charts(bats):
RE_ADAPTER = re.compile(
- r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z]+)'
+ r'Adapter #([0-9]+) State(?:\s+)?: ([a-zA-Z ]+)'
)
RE_VD = re.compile(
@@ -124,14 +124,14 @@ def find_batteries(d):
class Adapter:
def __init__(self, n, state):
self.id = n
- self.state = int(state == 'Degraded')
+ # TODO: Rewrite all of this
+ self.state = int(state in ("Partially Degraded", "Degraded", "Failed"))
def data(self):
return {
'adapter_{0}_degraded'.format(self.id): self.state,
}
-
class PD:
def __init__(self, n, media_err, predict_fail):
self.id = n
diff --git a/collectors/python.d.plugin/memcached/integrations/memcached.md b/collectors/python.d.plugin/memcached/integrations/memcached.md
index 012758304..113b86c8c 100644
--- a/collectors/python.d.plugin/memcached/integrations/memcached.md
+++ b/collectors/python.d.plugin/memcached/integrations/memcached.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Memcached"
learn_status: "Published"
learn_rel_path: "Data Collection/Databases"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -130,13 +131,13 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| host | the host to connect to. | 127.0.0.1 | False |
-| port | the port to connect to. | 11211 | False |
-| update_every | Sets the default data collection frequency. | 10 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| host | the host to connect to. | 127.0.0.1 | no |
+| port | the port to connect to. | 11211 | no |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/monit/integrations/monit.md b/collectors/python.d.plugin/monit/integrations/monit.md
index ecf522f84..18219141d 100644
--- a/collectors/python.d.plugin/monit/integrations/monit.md
+++ b/collectors/python.d.plugin/monit/integrations/monit.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Monit"
learn_status: "Published"
learn_rel_path: "Data Collection/Synthetic Checks"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -122,14 +123,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | False |
-| url | The URL to fetch Monit's metrics. | http://localhost:2812 | True |
-| user | Username in case the URL is password protected. | | False |
-| pass | Password in case the URL is password protected. | | False |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| url | The URL to fetch Monit's metrics. | http://localhost:2812 | yes |
+| user | Username in case the URL is password protected. | | no |
+| pass | Password in case the URL is password protected. | | no |
</details>
diff --git a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
index 8ed86bdf9..0e66c44eb 100644
--- a/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
+++ b/collectors/python.d.plugin/nsd/integrations/name_server_daemon.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Name Server Daemon"
learn_status: "Published"
learn_rel_path: "Data Collection/DNS and DHCP Servers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -146,12 +147,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 30 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| command | The command to run | nsd-control stats_noreset | False |
+| update_every | Sets the default data collection frequency. | 30 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| command | The command to run | nsd-control stats_noreset | no |
</details>
diff --git a/collectors/python.d.plugin/openldap/integrations/openldap.md b/collectors/python.d.plugin/openldap/integrations/openldap.md
index 375132edb..a9480a490 100644
--- a/collectors/python.d.plugin/openldap/integrations/openldap.md
+++ b/collectors/python.d.plugin/openldap/integrations/openldap.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "OpenLDAP"
learn_status: "Published"
learn_rel_path: "Data Collection/Authentication and Authorization"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -152,19 +153,19 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| username | The bind user with right to access monitor statistics | | True |
-| password | The password for the binded user | | True |
-| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | True |
-| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | True |
-| use_tls | Make True if a TLS connection is used over ldaps:// | False | False |
-| use_start_tls | Make True if a TLS connection is used over ldap:// | False | False |
-| cert_check | False if you want to ignore certificate check | True | True |
-| timeout | Seconds to timeout if no connection exist | | True |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| username | The bind user with right to access monitor statistics | | yes |
+| password | The password for the binded user | | yes |
+| server | The listening address of the LDAP server. In case of TLS, use the hostname which the certificate is published for. | | yes |
+| port | The listening port of the LDAP server. Change to 636 port in case of TLS connection. | 389 | yes |
+| use_tls | Make True if a TLS connection is used over ldaps:// | no | no |
+| use_start_tls | Make True if a TLS connection is used over ldap:// | no | no |
+| cert_check | False if you want to ignore certificate check | True | yes |
+| timeout | Seconds to timeout if no connection exist | | yes |
</details>
diff --git a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
index cb6637e8a..30557c021 100644
--- a/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
+++ b/collectors/python.d.plugin/oracledb/integrations/oracle_db.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Oracle DB"
learn_status: "Published"
learn_rel_path: "Data Collection/Databases"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -160,15 +161,15 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| user | The username for the user account. | no | True |
-| password | The password for the user account. | no | True |
-| server | The IP address or hostname (and port) of the Oracle Database Server. | no | True |
-| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | True |
-| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | True |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| user | The username for the user account. | no | yes |
+| password | The password for the user account. | no | yes |
+| server | The IP address or hostname (and port) of the Oracle Database Server. | no | yes |
+| service | The Oracle Database service name. To view the services available on your server run this query, `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`. | no | yes |
+| protocol | one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic | no | yes |
</details>
diff --git a/collectors/python.d.plugin/pandas/integrations/pandas.md b/collectors/python.d.plugin/pandas/integrations/pandas.md
index d5da2f262..83c5c66b1 100644
--- a/collectors/python.d.plugin/pandas/integrations/pandas.md
+++ b/collectors/python.d.plugin/pandas/integrations/pandas.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Pandas"
learn_status: "Published"
learn_rel_path: "Data Collection/Generic Data Collection"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -132,19 +133,19 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| chart_configs | an array of chart configuration dictionaries | [] | True |
-| chart_configs.name | name of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.title | title of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | True |
-| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | True |
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| chart_configs | an array of chart configuration dictionaries | [] | yes |
+| chart_configs.name | name of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.title | title of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.family | [family](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#families) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.context | [context](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/interact-new-charts.md#contexts) of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.type | the type of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.units | the units of the chart to be displayed in the dashboard. | None | yes |
+| chart_configs.df_steps | a series of pandas operations (one per line) that each returns a dataframe. | None | yes |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/postfix/integrations/postfix.md b/collectors/python.d.plugin/postfix/integrations/postfix.md
index 7113d7ddd..2bb99922c 100644
--- a/collectors/python.d.plugin/postfix/integrations/postfix.md
+++ b/collectors/python.d.plugin/postfix/integrations/postfix.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Postfix"
learn_status: "Published"
learn_rel_path: "Data Collection/Mail Servers"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -108,11 +109,11 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/puppet/integrations/puppet.md b/collectors/python.d.plugin/puppet/integrations/puppet.md
index be68749a3..ca190b576 100644
--- a/collectors/python.d.plugin/puppet/integrations/puppet.md
+++ b/collectors/python.d.plugin/puppet/integrations/puppet.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Puppet"
learn_status: "Published"
learn_rel_path: "Data Collection/CICD Platforms"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -121,16 +122,16 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | True |
-| tls_verify | Control HTTPS server certificate verification. | False | False |
-| tls_ca_file | Optional CA (bundle) file to use | | False |
-| tls_cert_file | Optional client certificate file | | False |
-| tls_key_file | Optional client key file | | False |
-| update_every | Sets the default data collection frequency. | 30 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| url | HTTP or HTTPS URL, exact Fully Qualified Domain Name of the node should be used. | https://fqdn.example.com:8081 | yes |
+| tls_verify | Control HTTPS server certificate verification. | False | no |
+| tls_ca_file | Optional CA (bundle) file to use | | no |
+| tls_cert_file | Optional client certificate file | | no |
+| tls_key_file | Optional client key file | | no |
+| update_every | Sets the default data collection frequency. | 30 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/python.d.plugin.in b/collectors/python.d.plugin/python.d.plugin.in
index bc171e032..86fea209c 100644
--- a/collectors/python.d.plugin/python.d.plugin.in
+++ b/collectors/python.d.plugin/python.d.plugin.in
@@ -222,8 +222,11 @@ class ModuleConfig:
def __init__(self, name, config=None):
self.name = name
self.config = config or OrderedDict()
+ self.is_stock = False
def load(self, abs_path):
+ if not IS_ATTY:
+ self.is_stock = abs_path.startswith(DIRS.modules_stock_config)
self.config.update(load_config(abs_path) or dict())
def defaults(self):
@@ -242,6 +245,7 @@ class ModuleConfig:
config = OrderedDict()
config.update(job_config)
config['job_name'] = job_name
+ config['__is_stock'] = self.is_stock
for k, v in self.defaults().items():
config.setdefault(k, v)
@@ -309,7 +313,8 @@ class JobsConfigsBuilder:
return None
configs = config.create_jobs()
- self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
+ if not config.is_stock:
+ self.log.info("[{0}] built {1} job(s) configs".format(module_name, len(configs)))
self.apply_defaults(configs, self.module_defaults)
self.apply_defaults(configs, self.job_defaults)
@@ -338,6 +343,7 @@ class Job(threading.Thread):
self.autodetection_retry = config['autodetection_retry']
self.checks = self.inf
self.job = None
+ self.is_stock = config.get('__is_stock', False)
self.status = JOB_STATUS_INIT
def is_inited(self):
@@ -350,8 +356,14 @@ class Job(threading.Thread):
return self.job.name
def check(self):
+ if self.is_stock:
+ self.job.logger.mute()
+
ok = self.job.check()
+
+ self.job.logger.unmute()
self.checks -= self.checks != self.inf and not ok
+
return ok
def create(self):
@@ -503,7 +515,6 @@ class FileLockRegistry:
name = "docker" + name[7:]
return name
-
def register(self, name):
name = self.rename(name)
if name in self.locks:
@@ -685,12 +696,14 @@ class Plugin:
try:
ok = job.check()
except Exception as error:
- self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
- job.module_name, job.real_name, repr(error)))
+ if not job.is_stock:
+ self.log.warning("{0}[{1}] : unhandled exception on check : {2}, skipping the job".format(
+ job.module_name, job.real_name, repr(error)))
job.status = JOB_STATUS_DROPPED
continue
if not ok:
- self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
+ if not job.is_stock:
+ self.log.info('{0}[{1}] : check failed'.format(job.module_name, job.real_name))
job.status = JOB_STATUS_RECOVERING if job.need_to_recheck() else JOB_STATUS_DROPPED
continue
self.log.info('{0}[{1}] : check success'.format(job.module_name, job.real_name))
@@ -876,7 +889,7 @@ def main():
cmd = parse_command_line()
log = PythonDLogger()
- level = os.getenv('NETDATA_LOG_SEVERITY_LEVEL') or str()
+ level = os.getenv('NETDATA_LOG_LEVEL') or str()
level = level.lower()
if level == 'debug':
log.logger.severity = 'DEBUG'
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
index a7acc23b6..3f122e1d9 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/SimpleService.py
@@ -8,7 +8,7 @@ import os
from bases.charts import Charts, ChartError, create_runtime_chart
from bases.collection import safe_print
-from bases.loggers import PythonDLimitedLogger
+from bases.loggers import PythonDLogger
from third_party.monotonic import monotonic
from time import sleep, time
@@ -62,7 +62,7 @@ def clean_module_name(name):
return name
-class SimpleService(PythonDLimitedLogger, object):
+class SimpleService(PythonDLogger, object):
"""
Prototype of Service class.
Implemented basic functionality to run jobs by `python.d.plugin`
@@ -73,7 +73,7 @@ class SimpleService(PythonDLimitedLogger, object):
:param configuration: <dict>
:param name: <str>
"""
- PythonDLimitedLogger.__init__(self)
+ PythonDLogger.__init__(self)
self.configuration = configuration
self.order = list()
self.definitions = dict()
diff --git a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
index 1faf036a4..76129d376 100644
--- a/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
+++ b/collectors/python.d.plugin/python_modules/bases/FrameworkServices/UrlService.py
@@ -6,8 +6,6 @@
import urllib3
-from distutils.version import StrictVersion as version
-
from bases.FrameworkServices.SimpleService import SimpleService
try:
@@ -15,28 +13,11 @@ try:
except AttributeError:
pass
-# https://github.com/urllib3/urllib3/blob/master/CHANGES.rst#19-2014-07-04
-# New retry logic and urllib3.util.retry.Retry configuration object. (Issue https://github.com/urllib3/urllib3/pull/326)
-URLLIB3_MIN_REQUIRED_VERSION = '1.9'
URLLIB3_VERSION = urllib3.__version__
URLLIB3 = 'urllib3'
-
-def version_check():
- if version(URLLIB3_VERSION) >= version(URLLIB3_MIN_REQUIRED_VERSION):
- return
-
- err = '{0} version: {1}, minimum required version: {2}, please upgrade'.format(
- URLLIB3,
- URLLIB3_VERSION,
- URLLIB3_MIN_REQUIRED_VERSION,
- )
- raise Exception(err)
-
-
class UrlService(SimpleService):
def __init__(self, configuration=None, name=None):
- version_check()
SimpleService.__init__(self, configuration=configuration, name=name)
self.debug("{0} version: {1}".format(URLLIB3, URLLIB3_VERSION))
self.url = self.configuration.get('url')
diff --git a/collectors/python.d.plugin/python_modules/bases/loggers.py b/collectors/python.d.plugin/python_modules/bases/loggers.py
index 47f196a6d..7ae8ab0c1 100644
--- a/collectors/python.d.plugin/python_modules/bases/loggers.py
+++ b/collectors/python.d.plugin/python_modules/bases/loggers.py
@@ -4,6 +4,8 @@
# SPDX-License-Identifier: GPL-3.0-or-later
import logging
+import os
+import stat
import traceback
from sys import exc_info
@@ -15,39 +17,46 @@ except ImportError:
from bases.collection import on_try_except_finally, unicode_str
+LOGGING_LEVELS = {
+ 'CRITICAL': 50,
+ 'ERROR': 40,
+ 'WARNING': 30,
+ 'INFO': 20,
+ 'DEBUG': 10,
+ 'NOTSET': 0,
+}
-LOGGING_LEVELS = {'CRITICAL': 50,
- 'ERROR': 40,
- 'WARNING': 30,
- 'INFO': 20,
- 'DEBUG': 10,
- 'NOTSET': 0}
-DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
-DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+def is_stderr_connected_to_journal():
+ journal_stream = os.environ.get("JOURNAL_STREAM")
+ if not journal_stream:
+ return False
-PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s'
-PYTHON_D_LOG_NAME = 'python.d'
+ colon_index = journal_stream.find(":")
+ if colon_index <= 0:
+ return False
+ device, inode = journal_stream[:colon_index], journal_stream[colon_index + 1:]
-def limiter(log_max_count=30, allowed_in_seconds=60):
- def on_decorator(func):
+ try:
+ device_number, inode_number = os.fstat(2)[stat.ST_DEV], os.fstat(2)[stat.ST_INO]
+ except OSError:
+ return False
- def on_call(*args):
- current_time = args[0]._runtime_counters.start_mono
- lc = args[0]._logger_counters
+ return str(device_number) == device and str(inode_number) == inode
- if lc.logged and lc.logged % log_max_count == 0:
- if current_time - lc.time_to_compare <= allowed_in_seconds:
- lc.dropped += 1
- return
- lc.time_to_compare = current_time
- lc.logged += 1
- func(*args)
+is_journal = is_stderr_connected_to_journal()
+
+DEFAULT_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s : %(message)s'
+PYTHON_D_LOG_LINE_FORMAT = '%(asctime)s: %(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s'
+
+if is_journal:
+ DEFAULT_LOG_LINE_FORMAT = '%(name)s %(levelname)s : %(message)s'
+ PYTHON_D_LOG_LINE_FORMAT = '%(name)s %(levelname)s: %(module_name)s[%(job_name)s] : %(message)s '
- return on_call
- return on_decorator
+DEFAULT_LOG_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
+PYTHON_D_LOG_NAME = 'python.d'
def add_traceback(func):
@@ -66,27 +75,16 @@ def add_traceback(func):
return on_call
-class LoggerCounters:
- def __init__(self):
- self.logged = 0
- self.dropped = 0
- self.time_to_compare = time()
-
- def __repr__(self):
- return 'LoggerCounter(logged: {logged}, dropped: {dropped})'.format(logged=self.logged,
- dropped=self.dropped)
-
-
class BaseLogger(object):
- def __init__(self, logger_name, log_fmt=DEFAULT_LOG_LINE_FORMAT, date_fmt=DEFAULT_LOG_TIME_FORMAT,
- handler=logging.StreamHandler):
- """
- :param logger_name: <str>
- :param log_fmt: <str>
- :param date_fmt: <str>
- :param handler: <logging handler>
- """
+ def __init__(
+ self,
+ logger_name,
+ log_fmt=DEFAULT_LOG_LINE_FORMAT,
+ date_fmt=DEFAULT_LOG_TIME_FORMAT,
+ handler=logging.StreamHandler,
+ ):
self.logger = logging.getLogger(logger_name)
+ self._muted = False
if not self.has_handlers():
self.severity = 'INFO'
self.logger.addHandler(handler())
@@ -96,11 +94,6 @@ class BaseLogger(object):
return '<Logger: {name})>'.format(name=self.logger.name)
def set_formatter(self, fmt, date_fmt=DEFAULT_LOG_TIME_FORMAT):
- """
- :param fmt: <str>
- :param date_fmt: <str>
- :return:
- """
if self.has_handlers():
self.logger.handlers[0].setFormatter(logging.Formatter(fmt=fmt, datefmt=date_fmt))
@@ -113,43 +106,48 @@ class BaseLogger(object):
@severity.setter
def severity(self, level):
- """
- :param level: <str> or <int>
- :return:
- """
if level in LOGGING_LEVELS:
self.logger.setLevel(LOGGING_LEVELS[level])
+ def _log(self, level, *msg, **kwargs):
+ if not self._muted:
+ self.logger.log(level, ' '.join(map(unicode_str, msg)), **kwargs)
+
def debug(self, *msg, **kwargs):
- self.logger.debug(' '.join(map(unicode_str, msg)), **kwargs)
+ self._log(logging.DEBUG, *msg, **kwargs)
def info(self, *msg, **kwargs):
- self.logger.info(' '.join(map(unicode_str, msg)), **kwargs)
+ self._log(logging.INFO, *msg, **kwargs)
def warning(self, *msg, **kwargs):
- self.logger.warning(' '.join(map(unicode_str, msg)), **kwargs)
+ self._log(logging.WARN, *msg, **kwargs)
def error(self, *msg, **kwargs):
- self.logger.error(' '.join(map(unicode_str, msg)), **kwargs)
+ self._log(logging.ERROR, *msg, **kwargs)
- def alert(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
+ def alert(self, *msg, **kwargs):
+ self._log(logging.CRITICAL, *msg, **kwargs)
@on_try_except_finally(on_finally=(exit, 1))
def fatal(self, *msg, **kwargs):
- self.logger.critical(' '.join(map(unicode_str, msg)), **kwargs)
+ self._log(logging.CRITICAL, *msg, **kwargs)
+
+ def mute(self):
+ self._muted = True
+
+ def unmute(self):
+ self._muted = False
class PythonDLogger(object):
- def __init__(self, logger_name=PYTHON_D_LOG_NAME, log_fmt=PYTHON_D_LOG_LINE_FORMAT):
- """
- :param logger_name: <str>
- :param log_fmt: <str>
- """
+ def __init__(
+ self,
+ logger_name=PYTHON_D_LOG_NAME,
+ log_fmt=PYTHON_D_LOG_LINE_FORMAT,
+ ):
self.logger = BaseLogger(logger_name, log_fmt=log_fmt)
self.module_name = 'plugin'
self.job_name = 'main'
- self._logger_counters = LoggerCounters()
_LOG_TRACEBACK = False
@@ -162,45 +160,39 @@ class PythonDLogger(object):
PythonDLogger._LOG_TRACEBACK = value
def debug(self, *msg):
- self.logger.debug(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
+ self.logger.debug(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
def info(self, *msg):
- self.logger.info(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
+ self.logger.info(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
def warning(self, *msg):
- self.logger.warning(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
+ self.logger.warning(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
@add_traceback
def error(self, *msg):
- self.logger.error(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
+ self.logger.error(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
@add_traceback
def alert(self, *msg):
- self.logger.alert(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
+ self.logger.alert(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
def fatal(self, *msg):
- self.logger.fatal(*msg, extra={'module_name': self.module_name,
- 'job_name': self.job_name or self.module_name})
-
-
-class PythonDLimitedLogger(PythonDLogger):
- @limiter()
- def info(self, *msg):
- PythonDLogger.info(self, *msg)
-
- @limiter()
- def warning(self, *msg):
- PythonDLogger.warning(self, *msg)
-
- @limiter()
- def error(self, *msg):
- PythonDLogger.error(self, *msg)
-
- @limiter()
- def alert(self, *msg):
- PythonDLogger.alert(self, *msg)
+ self.logger.fatal(*msg, extra={
+ 'module_name': self.module_name,
+ 'job_name': self.job_name or self.module_name,
+ })
diff --git a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
index c0b2cfbfd..ab51c0514 100644
--- a/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
+++ b/collectors/python.d.plugin/rethinkdbs/integrations/rethinkdb.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "RethinkDB"
learn_status: "Published"
learn_rel_path: "Data Collection/Databases"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -130,16 +131,16 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| host | Hostname or ip of the RethinkDB server. | localhost | False |
-| port | Port to connect to the RethinkDB server. | 28015 | False |
-| user | The username to use to connect to the RethinkDB server. | admin | False |
-| password | The password to use to connect to the RethinkDB server. | | False |
-| timeout | Set a connect timeout to the RethinkDB server. | 2 | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | Hostname or ip of the RethinkDB server. | localhost | no |
+| port | Port to connect to the RethinkDB server. | 28015 | no |
+| user | The username to use to connect to the RethinkDB server. | admin | no |
+| password | The password to use to connect to the RethinkDB server. | | no |
+| timeout | Set a connect timeout to the RethinkDB server. | 2 | no |
</details>
diff --git a/collectors/python.d.plugin/retroshare/integrations/retroshare.md b/collectors/python.d.plugin/retroshare/integrations/retroshare.md
index 753a218c1..4fc003c6f 100644
--- a/collectors/python.d.plugin/retroshare/integrations/retroshare.md
+++ b/collectors/python.d.plugin/retroshare/integrations/retroshare.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "RetroShare"
learn_status: "Published"
learn_rel_path: "Data Collection/Media Services"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -120,12 +121,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| url | The URL to the RetroShare Web UI. | http://localhost:9090 | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| url | The URL to the RetroShare Web UI. | http://localhost:9090 | no |
</details>
diff --git a/collectors/python.d.plugin/riakkv/integrations/riakkv.md b/collectors/python.d.plugin/riakkv/integrations/riakkv.md
index f83def446..2e8279bc3 100644
--- a/collectors/python.d.plugin/riakkv/integrations/riakkv.md
+++ b/collectors/python.d.plugin/riakkv/integrations/riakkv.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "RiakKV"
learn_status: "Published"
learn_rel_path: "Data Collection/Databases"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -150,11 +151,11 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| url | The url of the server | no | True |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| url | The url of the server | no | yes |
</details>
diff --git a/collectors/python.d.plugin/samba/integrations/samba.md b/collectors/python.d.plugin/samba/integrations/samba.md
index 5638c6d94..1bd1664ee 100644
--- a/collectors/python.d.plugin/samba/integrations/samba.md
+++ b/collectors/python.d.plugin/samba/integrations/samba.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Samba"
learn_status: "Published"
learn_rel_path: "Data Collection/Storage, Mount Points and Filesystems"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -166,10 +167,10 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
</details>
diff --git a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
index c807d6b3e..e426c8c83 100644
--- a/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
+++ b/collectors/python.d.plugin/sensors/integrations/linux_sensors_lm-sensors.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Linux Sensors (lm-sensors)"
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -122,11 +123,11 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | True |
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
+| types | The types of sensors to collect. | temperature, fan, voltage, current, power, energy, humidity | yes |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
</details>
diff --git a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
index a943f8704..5c5b569e9 100644
--- a/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
+++ b/collectors/python.d.plugin/smartd_log/integrations/s.m.a.r.t..md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "S.M.A.R.T."
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -168,14 +169,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| log_path | path to smartd log files. | /var/log/smartd | True |
-| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | False |
-| age | Time in minutes since the last dump to file. | 30 | False |
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| log_path | path to smartd log files. | /var/log/smartd | yes |
+| exclude_disks | Space-separated patterns. If the pattern is in the drive name, the module will not collect data for it. | | no |
+| age | Time in minutes since the last dump to file. | 30 | no |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
index dc4e95dec..a896164df 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.chart.py
@@ -39,6 +39,7 @@ ATTR171 = '171'
ATTR172 = '172'
ATTR173 = '173'
ATTR174 = '174'
+ATTR177 = '177'
ATTR180 = '180'
ATTR183 = '183'
ATTR190 = '190'
@@ -50,6 +51,8 @@ ATTR199 = '199'
ATTR202 = '202'
ATTR206 = '206'
ATTR233 = '233'
+ATTR241 = '241'
+ATTR242 = '242'
ATTR249 = '249'
ATTR_READ_ERR_COR = 'read-total-err-corrected'
ATTR_READ_ERR_UNC = 'read-total-unc-errors'
@@ -114,6 +117,8 @@ ORDER = [
'offline_uncorrectable_sector_count',
'percent_lifetime_used',
'media_wearout_indicator',
+ 'total_lbas_written',
+ 'total_lbas_read',
]
CHARTS = {
@@ -329,7 +334,7 @@ CHARTS = {
'media_wearout_indicator': {
'options': [None, 'Media Wearout Indicator', 'percentage', 'wear', 'smartd_log.media_wearout_indicator', 'line'],
'lines': [],
- 'attrs': [ATTR233],
+ 'attrs': [ATTR233, ATTR177],
'algo': ABSOLUTE,
},
'nand_writes_1gib': {
@@ -338,6 +343,18 @@ CHARTS = {
'attrs': [ATTR249],
'algo': ABSOLUTE,
},
+ 'total_lbas_written': {
+ 'options': [None, 'Total LBAs Written', 'sectors', 'wear', 'smartd_log.total_lbas_written', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR241],
+ 'algo': ABSOLUTE,
+ },
+ 'total_lbas_read': {
+ 'options': [None, 'Total LBAs Read', 'sectors', 'wear', 'smartd_log.total_lbas_read', 'line'],
+ 'lines': [],
+ 'attrs': [ATTR242],
+ 'algo': ABSOLUTE,
+ },
}
# NOTE: 'parse_temp' decodes ATA 194 raw value. Not heavily tested. Written by @Ferroin
@@ -519,6 +536,7 @@ def ata_attribute_factory(value):
elif name in [
ATTR1,
ATTR7,
+ ATTR177,
ATTR202,
ATTR206,
ATTR233,
diff --git a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
index af330bdd1..55ec8fa22 100644
--- a/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
+++ b/collectors/python.d.plugin/spigotmc/integrations/spigotmc.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "SpigotMC"
learn_status: "Published"
learn_rel_path: "Data Collection/Gaming"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -121,14 +122,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| host | The host's IP to connect to. | localhost | True |
-| port | The port the remote console is listening on. | 25575 | True |
-| password | Remote console password if any. | | False |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| host | The host's IP to connect to. | localhost | yes |
+| port | The port the remote console is listening on. | 25575 | yes |
+| password | Remote console password if any. | | no |
</details>
diff --git a/collectors/python.d.plugin/squid/integrations/squid.md b/collectors/python.d.plugin/squid/integrations/squid.md
index 484d8706c..6599826da 100644
--- a/collectors/python.d.plugin/squid/integrations/squid.md
+++ b/collectors/python.d.plugin/squid/integrations/squid.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Squid"
learn_status: "Published"
learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -117,14 +118,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 1 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | False |
-| host | The host to connect to. | | True |
-| port | The port to connect to. | | True |
-| request | The URL to request from Squid. | | True |
+| update_every | Sets the default data collection frequency. | 1 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | local | no |
+| host | The host to connect to. | | yes |
+| port | The port to connect to. | | yes |
+| request | The URL to request from Squid. | | yes |
</details>
diff --git a/collectors/python.d.plugin/tomcat/integrations/tomcat.md b/collectors/python.d.plugin/tomcat/integrations/tomcat.md
index 8210835c1..883f29dd3 100644
--- a/collectors/python.d.plugin/tomcat/integrations/tomcat.md
+++ b/collectors/python.d.plugin/tomcat/integrations/tomcat.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Tomcat"
learn_status: "Published"
learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -120,14 +121,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | True |
-| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | False |
-| pass | A valid password for the user in question. Required if the endpoint is password protected | no | False |
-| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| url | The URL of the Tomcat server's status endpoint. Always add the suffix ?XML=true. | no | yes |
+| user | A valid user with read permission to access the /manager/status endpoint of the server. Required if the endpoint is password protected | no | no |
+| pass | A valid password for the user in question. Required if the endpoint is password protected | no | no |
+| connector_name | The connector component that communicates with a web connector via the AJP protocol, e.g ajp-bio-8009 | | no |
</details>
diff --git a/collectors/python.d.plugin/tor/integrations/tor.md b/collectors/python.d.plugin/tor/integrations/tor.md
index f5c0026af..0e57fa793 100644
--- a/collectors/python.d.plugin/tor/integrations/tor.md
+++ b/collectors/python.d.plugin/tor/integrations/tor.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Tor"
learn_status: "Published"
learn_rel_path: "Data Collection/VPNs"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -122,14 +123,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| control_addr | Tor control IP address | 127.0.0.1 | False |
-| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | False |
-| password | Tor control password | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| control_addr | Tor control IP address | 127.0.0.1 | no |
+| control_port | Tor control port. Can be either a tcp port, or a path to a socket file. | 9051 | no |
+| password | Tor control password | | no |
</details>
diff --git a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
index 309265789..af58608bd 100644
--- a/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
+++ b/collectors/python.d.plugin/uwsgi/integrations/uwsgi.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "uWSGI"
learn_status: "Published"
learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -122,14 +123,14 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | False |
-| socket | The 'path/to/uwsgistats.sock' | no | False |
-| host | The host to connect to | no | False |
-| port | The port to connect to | no | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | The JOB's name as it will appear at the dashboard (by default is the job_name) | job_name | no |
+| socket | The 'path/to/uwsgistats.sock' | no | no |
+| host | The host to connect to | no | no |
+| port | The port to connect to | no | no |
</details>
diff --git a/collectors/python.d.plugin/varnish/integrations/varnish.md b/collectors/python.d.plugin/varnish/integrations/varnish.md
index 142875f4b..da74dcf8f 100644
--- a/collectors/python.d.plugin/varnish/integrations/varnish.md
+++ b/collectors/python.d.plugin/varnish/integrations/varnish.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "Varnish"
learn_status: "Published"
learn_rel_path: "Data Collection/Web Servers and Web Proxies"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -161,12 +162,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | True |
-| update_every | Sets the default data collection frequency. | 10 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
+| instance_name | the name of the varnishd instance to get logs from. If not specified, the local host name is used. | | yes |
+| update_every | Sets the default data collection frequency. | 10 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
</details>
diff --git a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
index 39987743e..fe3c05ba6 100644
--- a/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
+++ b/collectors/python.d.plugin/w1sensor/integrations/1-wire_sensors.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "1-Wire Sensors"
learn_status: "Published"
learn_rel_path: "Data Collection/Hardware Devices and Sensors"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -114,12 +115,12 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
-| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | False |
-| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | False |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
+| name | Job name. This value will overwrite the `job_name` value. JOBS with the same name are mutually exclusive. Only one of them will be allowed running at any time. This allows autodetection to try several alternatives and pick the one that works. | | no |
+| name_<1-Wire id> | This allows associating a human readable name with a sensor's 1-Wire identifier. | | no |
</details>
diff --git a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
index 1ebe865f0..9d7d1c3d5 100644
--- a/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
+++ b/collectors/python.d.plugin/zscores/integrations/python.d_zscores.md
@@ -4,6 +4,7 @@ meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/python.d.p
sidebar_label: "python.d zscores"
learn_status: "Published"
learn_rel_path: "Data Collection/Other"
+most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -123,20 +124,20 @@ Every configuration JOB starts with a `job_name` value which will appear in the
| Name | Description | Default | Required |
|:----|:-----------|:-------|:--------:|
-| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | True |
-| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | True |
-| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | True |
-| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | True |
-| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | True |
-| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | True |
-| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | True |
-| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | True |
-| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | True |
-| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | True |
-| update_every | Sets the default data collection frequency. | 5 | False |
-| priority | Controls the order of charts at the netdata dashboard. | 60000 | False |
-| autodetection_retry | Sets the job re-check interval in seconds. | 0 | False |
-| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | False |
+| charts_regex | what charts to pull data for - A regex like `system\..*/` or `system\..*/apps.cpu/apps.mem` etc. | system\..* | yes |
+| train_secs | length of time (in seconds) to base calculations off for mean and stddev. | 14400 | yes |
+| offset_secs | offset (in seconds) preceding latest data to ignore when calculating mean and stddev. | 300 | yes |
+| train_every_n | recalculate the mean and stddev every n steps of the collector. | 900 | yes |
+| z_smooth_n | smooth the z score (to reduce sensitivity to spikes) by averaging it over last n values. | 15 | yes |
+| z_clip | cap absolute value of zscore (before smoothing) for better stability. | 10 | yes |
+| z_abs | set z_abs: 'true' to make all zscores be absolute values only. | true | yes |
+| burn_in | burn in period in which to initially calculate mean and stddev on every step. | 2 | yes |
+| mode | mode can be to get a zscore 'per_dim' or 'per_chart'. | per_chart | yes |
+| per_chart_agg | per_chart_agg is how you aggregate from dimension to chart when mode='per_chart'. | mean | yes |
+| update_every | Sets the default data collection frequency. | 5 | no |
+| priority | Controls the order of charts at the netdata dashboard. | 60000 | no |
+| autodetection_retry | Sets the job re-check interval in seconds. | 0 | no |
+| penalty | Indicates whether to apply penalty to update_every in case of failures. | yes | no |
</details>