summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/nginx_plus
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/python.d.plugin/nginx_plus')
-rw-r--r--collectors/python.d.plugin/nginx_plus/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/nginx_plus/README.md165
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py487
-rw-r--r--collectors/python.d.plugin/nginx_plus/nginx_plus.conf85
4 files changed, 750 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/nginx_plus/Makefile.inc b/collectors/python.d.plugin/nginx_plus/Makefile.inc
new file mode 100644
index 0000000..d3fdeaf
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += nginx_plus/nginx_plus.chart.py
+dist_pythonconfig_DATA += nginx_plus/nginx_plus.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += nginx_plus/README.md nginx_plus/Makefile.inc
+
diff --git a/collectors/python.d.plugin/nginx_plus/README.md b/collectors/python.d.plugin/nginx_plus/README.md
new file mode 100644
index 0000000..2580740
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/README.md
@@ -0,0 +1,165 @@
+<!--
+title: "NGINX Plus monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/nginx_plus/README.md
+sidebar_label: "NGINX Plus"
+-->
+
+# NGINX Plus monitoring with Netdata
+
+Monitors one or more NGINX Plus servers depending on configuration. Servers can be either local or remote.
+
+Example nginx_plus configuration can be found in 'python.d/nginx_plus.conf'
+
+It produces following charts:
+
+1. **Requests total** in requests/s
+
+ - total
+
+2. **Requests current** in requests
+
+ - current
+
+3. **Connection Statistics** in connections/s
+
+ - accepted
+ - dropped
+
+4. **Workers Statistics** in workers
+
+ - idle
+ - active
+
+5. **SSL Handshakes** in handshakes/s
+
+ - successful
+ - failed
+
+6. **SSL Session Reuses** in sessions/s
+
+ - reused
+
+7. **SSL Memory Usage** in percent
+
+ - usage
+
+8. **Processes** in processes
+
+ - respawned
+
+For every server zone:
+
+1. **Processing** in requests
+
+- processing
+
+2. **Requests** in requests/s
+
+ - requests
+
+3. **Responses** in requests/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+4. **Traffic** in kilobits/s
+
+ - received
+ - sent
+
+For every upstream:
+
+1. **Peers Requests** in requests/s
+
+ - peer name (dimension per peer)
+
+2. **All Peers Responses** in responses/s
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+3. **Peer Responses** in requests/s (for every peer)
+
+ - 1xx
+ - 2xx
+ - 3xx
+ - 4xx
+ - 5xx
+
+4. **Peers Connections** in active
+
+ - peer name (dimension per peer)
+
+5. **Peers Connections Usage** in percent
+
+ - peer name (dimension per peer)
+
+6. **All Peers Traffic** in KB
+
+ - received
+ - sent
+
+7. **Peer Traffic** in KB/s (for every peer)
+
+ - received
+ - sent
+
+8. **Peer Timings** in ms (for every peer)
+
+ - header
+ - response
+
+9. **Memory Usage** in percent
+
+ - usage
+
+10. **Peers Status** in state
+
+ - peer name (dimension per peer)
+
+11. **Peers Total Downtime** in seconds
+
+ - peer name (dimension per peer)
+
+For every cache:
+
+1. **Traffic** in KB
+
+ - served
+ - written
+ - bypass
+
+2. **Memory Usage** in percent
+
+ - usage
+
+## Configuration
+
+Edit the `python.d/nginx_plus.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/nginx_plus.conf
+```
+
+Needs only `url` to server's `status`.
+
+Here is an example for a local server:
+
+```yaml
+local:
+ url : 'http://localhost/status'
+```
+
+Without configuration, module fail to start.
+
+---
+
+[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Fpython.d.plugin%2Fnginx_plus%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
new file mode 100644
index 0000000..a6c035f
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.chart.py
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-
+# Description: nginx_plus netdata python.d module
+# Author: Ilya Mashchenko (ilyam8)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from collections import defaultdict
+from copy import deepcopy
+from json import loads
+
+try:
+ from collections import OrderedDict
+except ImportError:
+ from third_party.ordereddict import OrderedDict
+
+from bases.FrameworkServices.UrlService import UrlService
+
+ORDER = [
+ 'requests_total',
+ 'requests_current',
+ 'connections_statistics',
+ 'connections_workers',
+ 'ssl_handshakes',
+ 'ssl_session_reuses',
+ 'ssl_memory_usage',
+ 'processes'
+]
+
+CHARTS = {
+ 'requests_total': {
+ 'options': [None, 'Requests Total', 'requests/s', 'requests', 'nginx_plus.requests_total', 'line'],
+ 'lines': [
+ ['requests_total', 'total', 'incremental']
+ ]
+ },
+ 'requests_current': {
+ 'options': [None, 'Requests Current', 'requests', 'requests', 'nginx_plus.requests_current', 'line'],
+ 'lines': [
+ ['requests_current', 'current']
+ ]
+ },
+ 'connections_statistics': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'connections', 'nginx_plus.connections_statistics', 'stacked'],
+ 'lines': [
+ ['connections_accepted', 'accepted', 'incremental'],
+ ['connections_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'connections_workers': {
+ 'options': [None, 'Workers Statistics', 'workers',
+ 'connections', 'nginx_plus.connections_workers', 'stacked'],
+ 'lines': [
+ ['connections_idle', 'idle'],
+ ['connections_active', 'active']
+ ]
+ },
+ 'ssl_handshakes': {
+ 'options': [None, 'SSL Handshakes', 'handshakes/s', 'ssl', 'nginx_plus.ssl_handshakes', 'stacked'],
+ 'lines': [
+ ['ssl_handshakes', 'successful', 'incremental'],
+ ['ssl_handshakes_failed', 'failed', 'incremental']
+ ]
+ },
+ 'ssl_session_reuses': {
+ 'options': [None, 'Session Reuses', 'sessions/s', 'ssl', 'nginx_plus.ssl_session_reuses', 'line'],
+ 'lines': [
+ ['ssl_session_reuses', 'reused', 'incremental']
+ ]
+ },
+ 'ssl_memory_usage': {
+ 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
+ 'lines': [
+ ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
+ ]
+ },
+ 'processes': {
+ 'options': [None, 'Processes', 'processes', 'processes', 'nginx_plus.processes', 'line'],
+ 'lines': [
+ ['processes_respawned', 'respawned']
+ ]
+ }
+}
+
+
+def cache_charts(cache):
+ family = 'cache {0}'.format(cache.real_name)
+ charts = OrderedDict()
+
+ charts['{0}_traffic'.format(cache.name)] = {
+ 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
+ 'lines': [
+ ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
+ ['_'.join([cache.name, 'miss_bytes']), 'bypass', 'absolute', 1, 1024]
+ ]
+ }
+ charts['{0}_memory_usage'.format(cache.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
+ ]
+ }
+ return charts
+
+
+def web_zone_charts(wz):
+ charts = OrderedDict()
+ family = 'web zone {name}'.format(name=wz.real_name)
+
+ # Processing
+ charts['zone_{name}_processing'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Processing'.format(name=wz.name), 'requests', family,
+ 'nginx_plus.web_zone_processing', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'processing']), 'processing']
+ ]
+ }
+ # Requests
+ charts['zone_{name}_requests'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Requests'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_requests', 'line'],
+ 'lines': [
+ ['_'.join([wz.name, 'requests']), 'requests', 'incremental']
+ ]
+ }
+ # Response Codes
+ charts['zone_{name}_responses'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Responses'.format(name=wz.name), 'requests/s', family,
+ 'nginx_plus.web_zone_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wz.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wz.name, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Traffic
+ charts['zone_{name}_net'.format(name=wz.name)] = {
+ 'options': [None, 'Zone "{name}" Traffic'.format(name=wz.name), 'kilobits/s', family,
+ 'nginx_plus.zone_net', 'area'],
+ 'lines': [
+ ['_'.join([wz.name, 'received']), 'received', 'incremental', 1, 1000],
+ ['_'.join([wz.name, 'sent']), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ return charts
+
+
+def web_upstream_charts(wu):
+ def dimensions(value, a='absolute', m=1, d=1):
+ dims = list()
+ for p in wu:
+ dims.append(['_'.join([wu.name, p.server, value]), p.real_server, a, m, d])
+ return dims
+
+ charts = OrderedDict()
+ family = 'web upstream {name}'.format(name=wu.real_name)
+
+ # Requests
+ charts['web_upstream_{name}_requests'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Requests', 'requests/s', family, 'nginx_plus.web_upstream_requests', 'line'],
+ 'lines': dimensions('requests', 'incremental')
+ }
+ # Responses Codes
+ charts['web_upstream_{name}_all_responses'.format(name=wu.name)] = {
+ 'options': [None, 'All Peers Responses', 'responses/s', family,
+ 'nginx_plus.web_upstream_all_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, 'responses_1xx']), '1xx', 'incremental'],
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_responses'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Responses'.format(peer.real_server), 'responses/s', family,
+ 'nginx_plus.web_upstream_peer_responses', 'stacked'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'responses_2xx']), '2xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_5xx']), '5xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_3xx']), '3xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_4xx']), '4xx', 'incremental'],
+ ['_'.join([wu.name, peer.server, 'responses_1xx']), '1xx', 'incremental']
+ ]
+ }
+ # Connections
+ charts['web_upstream_{name}_connections'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections', 'active', family, 'nginx_plus.web_upstream_connections', 'line'],
+ 'lines': dimensions('active')
+ }
+ charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Connections Usage', 'percentage', family,
+ 'nginx_plus.web_upstream_connections_usage', 'line'],
+ 'lines': dimensions('connections_usage', d=100)
+ }
+ # Traffic
+ charts['web_upstream_{0}_all_net'.format(wu.name)] = {
+ 'options': [None, 'All Peers Traffic', 'kilobits/s', family, 'nginx_plus.web_upstream_all_net', 'area'],
+ 'lines': [
+ ['{0}_received'.format(wu.name), 'received', 'incremental', 1, 1000],
+ ['{0}_sent'.format(wu.name), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_net'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Traffic'.format(peer.real_server), 'kilobits/s', family,
+ 'nginx_plus.web_upstream_peer_traffic', 'area'],
+ 'lines': [
+ ['{0}_{1}_received'.format(wu.name, peer.server), 'received', 'incremental', 1, 1000],
+ ['{0}_{1}_sent'.format(wu.name, peer.server), 'sent', 'incremental', -1, 1000]
+ ]
+ }
+ # Response Time
+ for peer in wu:
+ charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
+ 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
+ 'nginx_plus.web_upstream_peer_timings', 'line'],
+ 'lines': [
+ ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
+ ['_'.join([wu.name, peer.server, 'response_time']), 'response']
+ ]
+ }
+ # Memory Usage
+ charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
+ 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
+ 'lines': [
+ ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
+ ]
+ }
+ # State
+ charts['web_upstream_{name}_status'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Status', 'state', family, 'nginx_plus.web_upstream_status', 'line'],
+ 'lines': dimensions('state')
+ }
+ # Downtime
+ charts['web_upstream_{name}_downtime'.format(name=wu.name)] = {
+ 'options': [None, 'Peers Downtime', 'seconds', family, 'nginx_plus.web_upstream_peer_downtime', 'line'],
+ 'lines': dimensions('downtime', d=1000)
+ }
+
+ return charts
+
+
+METRICS = {
+ 'SERVER': [
+ 'processes.respawned',
+ 'connections.accepted',
+ 'connections.dropped',
+ 'connections.active',
+ 'connections.idle',
+ 'ssl.handshakes',
+ 'ssl.handshakes_failed',
+ 'ssl.session_reuses',
+ 'requests.total',
+ 'requests.current',
+ 'slabs.SSL.pages.free',
+ 'slabs.SSL.pages.used'
+ ],
+ 'WEB_ZONE': [
+ 'processing',
+ 'requests',
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'discarded',
+ 'received',
+ 'sent'
+ ],
+ 'WEB_UPSTREAM_PEER': [
+ 'id',
+ 'server',
+ 'name',
+ 'state',
+ 'active',
+ 'max_conns',
+ 'requests',
+ 'header_time', # alive only
+ 'response_time', # alive only
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received',
+ 'downtime'
+ ],
+ 'WEB_UPSTREAM_SUMMARY': [
+ 'responses.1xx',
+ 'responses.2xx',
+ 'responses.3xx',
+ 'responses.4xx',
+ 'responses.5xx',
+ 'sent',
+ 'received'
+ ],
+ 'CACHE': [
+ 'hit.bytes', # served
+ 'miss.bytes_written', # written
+ 'miss.bytes' # bypass
+
+ ]
+}
+
+BAD_SYMBOLS = re.compile(r'[:/.-]+')
+
+
+class Cache:
+ key = 'caches'
+ charts = cache_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['caches'][self.real_name]
+ data = parse_json(zone_data, METRICS['CACHE'])
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebZone:
+ key = 'server_zones'
+ charts = web_zone_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+
+ def get_data(self, raw_data):
+ zone_data = raw_data['server_zones'][self.real_name]
+ data = parse_json(zone_data, METRICS['WEB_ZONE'])
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstream:
+ key = 'upstreams'
+ charts = web_upstream_charts
+
+ def __init__(self, **kw):
+ self.real_name = kw['name']
+ self.name = BAD_SYMBOLS.sub('_', self.real_name)
+ self.peers = OrderedDict()
+
+ peers = kw['response']['upstreams'][self.real_name]['peers']
+ for peer in peers:
+ self.add_peer(peer['id'], peer['server'])
+
+ def __iter__(self):
+ return iter(self.peers.values())
+
+ def add_peer(self, idx, server):
+ peer = WebUpstreamPeer(idx, server)
+ self.peers[peer.real_server] = peer
+ return peer
+
+ def peers_stats(self, peers):
+ peers = {int(peer['id']): peer for peer in peers}
+ data = dict()
+ for peer in self.peers.values():
+ if not peer.active:
+ continue
+ try:
+ data.update(peer.get_data(peers[peer.id]))
+ except KeyError:
+ peer.active = False
+ return data
+
+ def memory_usage(self, data):
+ used = data['slabs'][self.real_name]['pages']['used']
+ free = data['slabs'][self.real_name]['pages']['free']
+ return used / float(free + used) * 1e4
+
+ def summary_stats(self, data):
+ rv = defaultdict(int)
+ for metric in METRICS['WEB_UPSTREAM_SUMMARY']:
+ for peer in self.peers.values():
+ if peer.active:
+ metric = '_'.join(metric.split('.'))
+ rv[metric] += data['_'.join([peer.server, metric])]
+ return rv
+
+ def get_data(self, raw_data):
+ data = dict()
+ peers = raw_data['upstreams'][self.real_name]['peers']
+ data.update(self.peers_stats(peers))
+ data.update(self.summary_stats(data))
+ data['memory_usage'] = self.memory_usage(raw_data)
+ return dict(('_'.join([self.name, k]), v) for k, v in data.items())
+
+
+class WebUpstreamPeer:
+ def __init__(self, idx, server):
+ self.id = idx
+ self.real_server = server
+ self.server = BAD_SYMBOLS.sub('_', self.real_server)
+ self.active = True
+
+ def get_data(self, raw):
+ data = dict(header_time=0, response_time=0, max_conns=0)
+ data.update(parse_json(raw, METRICS['WEB_UPSTREAM_PEER']))
+ data['connections_usage'] = 0 if not data['max_conns'] else data['active'] / float(data['max_conns']) * 1e4
+ data['state'] = int(data['state'] == 'up')
+ return dict(('_'.join([self.server, k]), v) for k, v in data.items())
+
+
+class Service(UrlService):
+ def __init__(self, configuration=None, name=None):
+ UrlService.__init__(self, configuration=configuration, name=name)
+ self.order = list(ORDER)
+ self.definitions = deepcopy(CHARTS)
+ self.objects = dict()
+
+ def check(self):
+ if not self.url:
+ self.error('URL is not defined')
+ return None
+
+ self._manager = self._build_manager()
+ if not self._manager:
+ return None
+
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+
+ try:
+ response = loads(raw_data)
+ except ValueError:
+ return None
+
+ for obj_cls in [WebZone, WebUpstream, Cache]:
+ for obj_name in response.get(obj_cls.key, list()):
+ obj = obj_cls(name=obj_name, response=response)
+ self.objects[obj.real_name] = obj
+ charts = obj_cls.charts(obj)
+ for chart in charts:
+ self.order.append(chart)
+ self.definitions[chart] = charts[chart]
+
+ return bool(self.objects)
+
+ def _get_data(self):
+ """
+ Format data received from http request
+ :return: dict
+ """
+ raw_data = self._get_raw_data()
+ if not raw_data:
+ return None
+ response = loads(raw_data)
+
+ data = parse_json(response, METRICS['SERVER'])
+ data['ssl_memory_usage'] = data['slabs_SSL_pages_used'] / float(data['slabs_SSL_pages_free']) * 1e4
+
+ for obj in self.objects.values():
+ if obj.real_name in response[obj.key]:
+ data.update(obj.get_data(response))
+
+ return data
+
+
+def parse_json(raw_data, metrics):
+ data = dict()
+ for metric in metrics:
+ value = raw_data
+ metrics_list = metric.split('.')
+ try:
+ for m in metrics_list:
+ value = value[m]
+ except KeyError:
+ continue
+ data['_'.join(metrics_list)] = value
+ return data
diff --git a/collectors/python.d.plugin/nginx_plus/nginx_plus.conf b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
new file mode 100644
index 0000000..201eb0e
--- /dev/null
+++ b/collectors/python.d.plugin/nginx_plus/nginx_plus.conf
@@ -0,0 +1,85 @@
+# netdata python.d.plugin configuration for nginx_plus
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, nginx_plus also supports the following:
+#
+# url: 'URL' # the URL to fetch nginx_plus's stats
+#
+# if the URL is password protected, the following are supported:
+#
+# user: 'username'
+# pass: 'password'
+
+# ----------------------------------------------------------------------
+# AUTO-DETECTION JOBS
+# only one of them will run (they have the same name)
+
+localhost:
+ name : 'local'
+ url : 'http://localhost/status'
+
+localipv4:
+ name : 'local'
+ url : 'http://127.0.0.1/status'
+
+localipv6:
+ name : 'local'
+ url : 'http://[::1]/status'