summaryrefslogtreecommitdiffstats
path: root/conf.d/python.d
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--conf.d/python.d.conf9
-rw-r--r--conf.d/python.d/dns_query_time.conf72
-rw-r--r--conf.d/python.d/elasticsearch.conf17
-rw-r--r--conf.d/python.d/fail2ban.conf11
-rw-r--r--conf.d/python.d/go_expvar.conf106
-rw-r--r--conf.d/python.d/isc_dhcpd.conf11
-rw-r--r--conf.d/python.d/postgres.conf1
-rw-r--r--conf.d/python.d/rabbitmq.conf75
-rw-r--r--conf.d/python.d/samba.conf58
-rw-r--r--conf.d/python.d/smartd_log.conf8
-rw-r--r--conf.d/python.d/web_log.conf48
11 files changed, 383 insertions, 33 deletions
diff --git a/conf.d/python.d.conf b/conf.d/python.d.conf
index 9ed346cdc..0a37e40ae 100644
--- a/conf.d/python.d.conf
+++ b/conf.d/python.d.conf
@@ -26,11 +26,13 @@ log_interval: 3600
# If "default_run" = "no" the default for all modules is disabled (no).
# Setting any of these to "yes" will enable it.
-# apache_cache: yes
+# apache_cache has been replaced by web_log
+apache_cache: no
# apache: yes
# bind_rndc: yes
# cpufreq: yes
# cpuidle: yes
+# dns_query_time: yes
# dovecot: yes
# elasticsearch: yes
@@ -43,7 +45,7 @@ example: no
# gunicorn_log has been replaced by web_log
gunicorn_log: no
-
+go_expvar: no
# haproxy: yes
# hddtemp: yes
# ipfs: yes
@@ -52,6 +54,7 @@ gunicorn_log: no
# memcached: yes
# mysql: yes
# nginx: yes
+# nsd: yes
# nginx_log has been replaced by web_log
nginx_log: no
@@ -60,9 +63,11 @@ nginx_log: no
# phpfpm: yes
# postfix: yes
# postgres: yes
+# rabbitmq: yes
# redis: yes
# retroshare: yes
# sensors: yes
+# samba: yes
# smartd_log: yes
# squid: yes
# tomcat: yes
diff --git a/conf.d/python.d/dns_query_time.conf b/conf.d/python.d/dns_query_time.conf
new file mode 100644
index 000000000..f4d4dbf92
--- /dev/null
+++ b/conf.d/python.d/dns_query_time.conf
@@ -0,0 +1,72 @@
+# netdata python.d.plugin configuration for dns_query_time
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, dns_query_time also supports the following:
+#
+# dns_servers: 'dns servers' # list of dns servers to query
+# domains: 'domains' # list of domains
+# aggregate: yes/no # Default: yes. Aggregate all servers in one chart or not
+# response_timeout: 4 # Defalt: 4. Dns query response timeout (query = -100 if response time > response_time)
+#
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+#
+#aggregate: yes
+#dns_servers: '8.8.8.8 8.8.4.4'
+#domains: 'python.org distrowatch.com linuxmint.com linux.com rsyslog.com liblognorm.com archlinux.org cisco.com debian.org kernel.org gns3.com opera.com github.com youtube.com amazon.co.uk kde.org netdata.firehol.org ubuntu.com redhat.com opensuse.org wireshark.org vmware.com microsoft.com elastic.co'
diff --git a/conf.d/python.d/elasticsearch.conf b/conf.d/python.d/elasticsearch.conf
index f98aaeced..7c35df229 100644
--- a/conf.d/python.d/elasticsearch.conf
+++ b/conf.d/python.d/elasticsearch.conf
@@ -61,19 +61,16 @@
# cluster_health: False/True # Calls to cluster health elasticsearch API. Enabled by default.
# cluster_stats: False/True # Calls to cluster stats elasticsearch API. Enabled by default.
#
-# ----------------------------------------------------------------------
-# IMPORTANT Information
-#
-# Module uses python `requests` package
#
-# You need to install it manually. (python-requests or python3-requests depending on the version of python).
+# if the URL is password protected, the following are supported:
#
+# user: 'username'
+# pass: 'password'
#
+# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
# only one of them will run (they have the same name)
#
-#local:
-# host: '127.0.0.1'
-# port: '9200'
-# cluster_health: True
-# cluster_stats: True
+local:
+ host: '127.0.0.1'
+ port: '9200'
diff --git a/conf.d/python.d/fail2ban.conf b/conf.d/python.d/fail2ban.conf
index d9664e353..76277108b 100644
--- a/conf.d/python.d/fail2ban.conf
+++ b/conf.d/python.d/fail2ban.conf
@@ -58,15 +58,6 @@
#
# log_path: 'path to fail2ban.log' # Default: '/var/log/fail2ban.log'
# conf_path: 'path to jail.local/jail.conf' # Default: '/etc/fail2ban/jail.local'
-# conf_dir: 'path to jail.d/' # Default: '' empty
+# conf_dir: 'path to jail.d/' # Default: '/etc/fail2ban/jail.d/'
# exclude: 'jails you want to exclude from autodetection' # Default: '[]' empty list
#------------------------------------------------------------------------------------------------------------------
-# ------------------------------------------------------------------------------------------------------------------
-# AUTO-DETECTION JOBS
-# only one of them will run (they have the same name)
-
-local:
- log_path: '/var/log/fail2ban.log'
- conf_path: '/etc/fail2ban/jail.local'
-# conf_dir: '/etc/fail2ban/jail.d/'
-# exclude: 'dropbear apache'
diff --git a/conf.d/python.d/go_expvar.conf b/conf.d/python.d/go_expvar.conf
new file mode 100644
index 000000000..5be4890dc
--- /dev/null
+++ b/conf.d/python.d/go_expvar.conf
@@ -0,0 +1,106 @@
+# netdata python.d.plugin configuration for go_expvar
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: my_name # the JOB's name as it will appear at the
+# # dashboard. If name: is not supplied the
+# # job_name: will be used (use _ for spaces)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, this plugin also supports the following:
+#
+# url: 'http://127.0.0.1/debug/vars' # the URL of the expvar endpoint
+# ss_cert: # ignore HTTPS self-signed certificate
+# proxy: # use HTTP proxy
+#
+# As the plugin cannot possibly know the port your application listens on, there is no default value. Please include
+# the whole path of the endpoint, as the expvar handler can be installed in a non-standard location.
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# collect_memstats: true # enables charts for Go runtime's memory statistics
+# extra_charts: {} # defines extra data/charts to monitor, please see the example below
+#
+# If collect_memstats is disabled and no extra charts are defined, this module will disable itself, as it has no data to
+# collect.
+#
+# Please visit the module wiki page for more information on how to use the extra_charts variable:
+#
+# https://github.com/firehol/netdata/wiki/Monitoring-Go-Applications#monitoring-custom-vars-with-go_expvar
+#
+# Configuration example
+# ---------------------
+
+#app1:
+# name : 'app1'
+# url : 'http://127.0.0.1:8080/debug/vars'
+# collect_memstats: true
+# extra_charts:
+# - id: "runtime_goroutines"
+# options:
+# name: num_goroutines
+# title: "runtime: number of goroutines"
+# units: goroutines
+# family: runtime
+# context: expvar.runtime.goroutines
+# chart_type: line
+# lines:
+# - {expvar_key: 'runtime.goroutines', expvar_type: int, id: runtime_goroutines}
+# - id: "foo_counters"
+# options:
+# name: counters
+# title: "some random counters"
+# units: awesomeness
+# family: counters
+# context: expvar.foo.counters
+# chart_type: line
+# lines:
+# - {expvar_key: 'counters.cnt1', expvar_type: int, id: counters_cnt1}
+# - {expvar_key: 'counters.cnt2', expvar_type: float, id: counters_cnt2}
+
diff --git a/conf.d/python.d/isc_dhcpd.conf b/conf.d/python.d/isc_dhcpd.conf
index 7c8fe3ceb..938ca6e72 100644
--- a/conf.d/python.d/isc_dhcpd.conf
+++ b/conf.d/python.d/isc_dhcpd.conf
@@ -56,8 +56,11 @@
#
# Additionally to the above, isc_dhcpd supports the following:
#
-# leases_path: 'PATH' # the path to dhcpd.leases file
-# pools: 'dhcpd pools list' # Pools in CIDR format
+# leases_path: 'PATH' # the path to dhcpd.leases file
+# pools:
+# office: '192.168.2.0/24' # name(dimension): pool in CIDR format
+# wifi: '192.168.3.0/24' # name(dimension): pool in CIDR format
+# 192.168.4.0/24: '192.168.4.0/24' # name(dimension): pool in CIDR format
#
#-----------------------------------------------------------------------
# IMPORTANT notes
@@ -75,4 +78,6 @@
#
#leases:
# leases_path : '/var/lib/dhcp/dhcpd.leases'
-# pools : '192.168.3.0/24 192.168.4.0/24 192.168.5.0/24'
+# pools:
+# office: '192.168.2.0/24'
+# wifi: '192.168.3.0/24'
diff --git a/conf.d/python.d/postgres.conf b/conf.d/python.d/postgres.conf
index d4d2bafcc..12dddae67 100644
--- a/conf.d/python.d/postgres.conf
+++ b/conf.d/python.d/postgres.conf
@@ -68,6 +68,7 @@
#
# table_stats : false
# index_stats : false
+# database_poll : 'dbase_name1 dbase_name2' # poll only specified databases (all other will be excluded from charts)
#
# Postfix permissions are configured at its pg_hba.conf file. You can
# "trust" local clients to allow netdata to connect, or you can create
diff --git a/conf.d/python.d/rabbitmq.conf b/conf.d/python.d/rabbitmq.conf
new file mode 100644
index 000000000..eccf65df9
--- /dev/null
+++ b/conf.d/python.d/rabbitmq.conf
@@ -0,0 +1,75 @@
+# netdata python.d.plugin configuration for rabbitmq
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+# Additionally to the above, rabbitmq plugin also supports the following:
+#
+# host: 'ipaddress' # Server ip address or hostname. Default: 127.0.0.1
+# port: 'port' # Rabbitmq port. Default: 15672
+# scheme: 'scheme' # http or https. Default: http
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+#
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+#
+local:
+ host: '127.0.0.1'
+ user: 'guest'
+ pass: 'guest'
diff --git a/conf.d/python.d/samba.conf b/conf.d/python.d/samba.conf
new file mode 100644
index 000000000..865281cd6
--- /dev/null
+++ b/conf.d/python.d/samba.conf
@@ -0,0 +1,58 @@
+# netdata python.d.plugin configuration for samba
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+update_every: 5
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# retries sets the number of retries to be made in case of failures.
+# If unset, the default for python.d.plugin is used.
+# Attempts to restore the service are made once every update_every
+# and only if the module has collected values in the past.
+# retries: 5
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# retries: 5 # the JOB's number of restoration attempts
+#
+#
+
diff --git a/conf.d/python.d/smartd_log.conf b/conf.d/python.d/smartd_log.conf
index e16454dfb..8764ffd3e 100644
--- a/conf.d/python.d/smartd_log.conf
+++ b/conf.d/python.d/smartd_log.conf
@@ -58,11 +58,11 @@
#
# log_path: '/path/to/smartdlogs' # path to smartd log files. Default is /var/log/smartd
# raw_values: no # raw or normalized values on charts. Default is normalized.
-# smart_attributes: '1 2 3 4 44' # add additional smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
+# smart_attributes: '1 2 3 4 44' # smart attributes charts. Default are ['1', '4', '5', '7', '9', '12', '193', '194', '197', '198', '200'].
#
# ----------------------------------------------------------------------
# Additional information
-# Plugin reads smartd log files (-A option).
+# Plugin reads smartd log files (-A option).
# You need to add (man smartd) to /etc/default/smartmontools '-i 600 -A /var/log/smartd/' to pass additional options to smartd on startup
# Then restart smartd service and check /path/log/smartdlogs
# ls /var/log/smartd/
@@ -74,12 +74,12 @@
# RAW vs NORMALIZED values
# "Normalized value", commonly referred to as just "value". This is a most universal measurement, on the scale from 0 (bad) to some maximum (good) value.
# Maximum values are typically 100, 200 or 253. Rule of thumb is: high values are good, low values are bad.
-#
+#
# "Raw value" - the value of the attribute as it is tracked by the device, before any normalization takes place.
# Some raw numbers provide valuable insight when properly interpreted. These cases will be discussed later on.
# Raw values are typically listed in hexadecimal numbers. The raw value has different structure for different vendors and is often not meaningful as a decimal number.
#
#
# JOB configuration
-#
+#
log_path: '/var/log/smartd'
diff --git a/conf.d/python.d/web_log.conf b/conf.d/python.d/web_log.conf
index 06656285f..e51b565d6 100644
--- a/conf.d/python.d/web_log.conf
+++ b/conf.d/python.d/web_log.conf
@@ -60,13 +60,21 @@
# Additionally to the above, web_log also supports the following:
#
# path: 'PATH' # the path to web server log file
-# detailed_response_codes: yes/no # Default: yes. Additional chart where response codes are not grouped
-# detailed_response_aggregate: yes/no # Default: yes. Not aggregated detailed response codes charts
-# all_time : yes/no # Default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# path: 'PATH[0-9]*[0-9]' # log files with date suffix are also supported
+# detailed_response_codes: yes/no # default: yes. Additional chart where response codes are not grouped
+# detailed_response_aggregate: yes/no # default: yes. Not aggregated detailed response codes charts
+# all_time : yes/no # default: yes. All time unique client IPs chart (50000 addresses ~ 400KB)
+# filter: # filter with regex
+# include: 'REGEX' # only those rows that matches the regex
+# exclude: 'REGEX' # all rows except those that matches the regex
# categories: # requests per url chart configuration
# cacti: 'cacti.*' # name(dimension): REGEX to match
# observium: 'observium.*' # name(dimension): REGEX to match
# stub_status: 'stub_status' # name(dimension): REGEX to match
+# user_defined: # requests per pattern in <user_defined> field (custom_log_format)
+# cacti: 'cacti.*' # name(dimension): REGEX to match
+# observium: 'observium.*' # name(dimension): REGEX to match
+# stub_status: 'stub_status' # name(dimension): REGEX to match
# custom_log_format: # define a custom log format
# pattern: '(?P<address>[\da-f.:]+) -.*?"(?P<method>[A-Z]+) (?P<url>.*?)" (?P<code>[1-9]\d{2}) (?P<bytes_sent>\d+) (?P<resp_length>\d+) (?P<resp_time>\d\.\d+) '
# time_multiplier: 1000000 # type <int> - convert time to microseconds
@@ -83,7 +91,7 @@
# nginx:
# log_format netdata '$remote_addr - $remote_user [$time_local] '
# '"$request" $status $body_bytes_sent '
-# '$request_length $request_time '
+# '$request_length $request_time $upstream_response_time '
# '"$http_referer" "$http_user_agent"';
# access_log /var/log/nginx/access.log netdata;
#
@@ -145,3 +153,35 @@ gunicorn_log:
gunicorn_log2:
name: 'gunicorn'
path: '/var/log/gunicorn/gunicorn-access.log'
+
+# -------------------------------------------
+# Apache Cache
+apache_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache/cache.log'
+
+apache2_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/apache2/cache.log'
+
+httpd_cache:
+ name: 'apache_cache'
+ type: 'apache_cache'
+ path: '/var/log/httpd/cache.log'
+
+# -------------------------------------------
+# Squid
+
+# debian/ubuntu
+squid_log1:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid3/access.log'
+
+#gentoo
+squid_log2:
+ name: 'squid'
+ type: 'squid'
+ path: '/var/log/squid/access.log'