diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:22:44 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2018-11-07 12:22:44 +0000 |
commit | 1e6c93250172946eeb38e94a92a1fd12c9d3011e (patch) | |
tree | 8ca5e16dfc7ad6b3bf2738ca0a48408a950f8f7e /collectors/python.d.plugin/unbound | |
parent | Update watch file (diff) | |
download | netdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.tar.xz netdata-1e6c93250172946eeb38e94a92a1fd12c9d3011e.zip |
Merging upstream version 1.11.0+dfsg.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | collectors/python.d.plugin/unbound/Makefile.inc | 13 | ||||
-rw-r--r-- | collectors/python.d.plugin/unbound/README.md | 76 | ||||
-rw-r--r-- | collectors/python.d.plugin/unbound/unbound.chart.py | 275 | ||||
-rw-r--r-- | collectors/python.d.plugin/unbound/unbound.conf | 87 |
4 files changed, 451 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/unbound/Makefile.inc b/collectors/python.d.plugin/unbound/Makefile.inc new file mode 100644 index 000000000..59c306aed --- /dev/null +++ b/collectors/python.d.plugin/unbound/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += unbound/unbound.chart.py +dist_pythonconfig_DATA += unbound/unbound.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += unbound/README.md unbound/Makefile.inc + diff --git a/collectors/python.d.plugin/unbound/README.md b/collectors/python.d.plugin/unbound/README.md new file mode 100644 index 000000000..3b4fa16fd --- /dev/null +++ b/collectors/python.d.plugin/unbound/README.md @@ -0,0 +1,76 @@ +# unbound + +Monitoring uses the remote control interface to fetch statistics. + +Provides the following charts: + +1. **Queries Processed** + * Ratelimited + * Cache Misses + * Cache Hits + * Expired + * Prefetched + * Recursive + +2. **Request List** + * Average Size + * Max Size + * Overwritten Requests + * Overruns + * Current Size + * User Requests + +3. **Recursion Timings** + * Average recursion processing time + * Median recursion processing time + +If extended stats are enabled, also provides: + +4. **Cache Sizes** + * Message Cache + * RRset Cache + * Infra Cache + * DNSSEC Key Cache + * DNSCrypt Shared Secret Cache + * DNSCrypt Nonce Cache + +### configuration + +Unbound must be manually configured to enable the remote-control protocol. +Check the Unbound documentation for info on how to do this. Additionally, +if you want to take advantage of the autodetection this plugin offers, +you will need to make sure your `unbound.conf` file only uses spaces for +indentation (the default config shipped by most distributions uses tabs +instead of spaces). + +Once you have the Unbound control protocol enabled, you need to make sure +that either the certificate and key are readable by Netdata (if you're +using the regular control interface), or that the socket is accessible +to Netdata (if you're using a UNIX socket for the contorl interface). + +By default, for the local system, everything can be auto-detected +assuming Unbound is configured correctly and has been told to listen +on the loopback interface or a UNIX socket. This is done by looking +up info in the Unbound config file specified by the `ubconf` key. + +To enable extended stats for a given job, add `extended: yes` to the +definition. + +You can also enable per-thread charts for a given job by adding +`per_thread: yes` to the definition. Note that the numbe rof threads +is only checked on startup. + +A basic local configuration with extended statistics and per-thread +charts looks like this: + +```yaml +local: + ubconf: /etc/unbound/unbound.conf + extended: yes + per_thread: yes +``` + +While it's a bit more complicated to set up correctly, it is recommended +that you use a UNIX socket as it provides far better performance. + +--- diff --git a/collectors/python.d.plugin/unbound/unbound.chart.py b/collectors/python.d.plugin/unbound/unbound.chart.py new file mode 100644 index 000000000..52fcbf7e2 --- /dev/null +++ b/collectors/python.d.plugin/unbound/unbound.chart.py @@ -0,0 +1,275 @@ +# -*- coding: utf-8 -*- +# Description: unbound netdata python.d module +# Author: Austin S. Hemmelgarn (Ferroin) +# SPDX-License-Identifier: GPL-3.0-or-later + +import os +import sys + +from copy import deepcopy + +from bases.FrameworkServices.SocketService import SocketService +from bases.loaders import YamlOrderedLoader + +PRECISION = 1000 + +ORDER = ['queries', 'recursion', 'reqlist'] + +CHARTS = { + 'queries': { + 'options': [None, 'Queries Processed', 'queries', 'Unbound', 'unbound.queries', 'line'], + 'lines': [ + ['ratelimit', 'ratelimited', 'absolute', 1, 1], + ['cachemiss', 'cache_miss', 'absolute', 1, 1], + ['cachehit', 'cache_hit', 'absolute', 1, 1], + ['expired', 'expired', 'absolute', 1, 1], + ['prefetch', 'prefetched', 'absolute', 1, 1], + ['recursive', 'recursive', 'absolute', 1, 1] + ] + }, + 'recursion': { + 'options': [None, 'Recursion Timings', 'seconds', 'Unbound', 'unbound.recursion', 'line'], + 'lines': [ + ['recursive_avg', 'average', 'absolute', 1, PRECISION], + ['recursive_med', 'median', 'absolute', 1, PRECISION] + ] + }, + 'reqlist': { + 'options': [None, 'Request List', 'items', 'Unbound', 'unbound.reqlist', 'line'], + 'lines': [ + ['reqlist_avg', 'average_size', 'absolute', 1, 1], + ['reqlist_max', 'maximum_size', 'absolute', 1, 1], + ['reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1], + ['reqlist_exceeded', 'overruns', 'absolute', 1, 1], + ['reqlist_current', 'current_size', 'absolute', 1, 1], + ['reqlist_user', 'user_requests', 'absolute', 1, 1] + ] + } +} + +# These get added too if we are told to use extended stats. +EXTENDED_ORDER = ['cache'] + +EXTENDED_CHARTS = { + 'cache': { + 'options': [None, 'Cache Sizes', 'items', 'Unbound', 'unbound.cache', 'stacked'], + 'lines': [ + ['cache_message', 'message_cache', 'absolute', 1, 1], + ['cache_rrset', 'rrset_cache', 'absolute', 1, 1], + ['cache_infra', 'infra_cache', 'absolute', 1, 1], + ['cache_key', 'dnssec_key_cache', 'absolute', 1, 1], + ['cache_dnscss', 'dnscrypt_Shared_Secret_cache', 'absolute', 1, 1], + ['cache_dnscn', 'dnscrypt_Nonce_cache', 'absolute', 1, 1] + ] + } +} + +# This is used as a templates for the per-thread charts. +PER_THREAD_CHARTS = { + '_queries': { + 'options': [None, '{longname} Queries Processed', 'queries', 'Queries Processed', + 'unbound.threads.queries', 'line'], + 'lines': [ + ['{shortname}_ratelimit', 'ratelimited', 'absolute', 1, 1], + ['{shortname}_cachemiss', 'cache_miss', 'absolute', 1, 1], + ['{shortname}_cachehit', 'cache_hit', 'absolute', 1, 1], + ['{shortname}_expired', 'expired', 'absolute', 1, 1], + ['{shortname}_prefetch', 'prefetched', 'absolute', 1, 1], + ['{shortname}_recursive', 'recursive', 'absolute', 1, 1] + ] + }, + '_recursion': { + 'options': [None, '{longname} Recursion Timings', 'seconds', 'Recursive Timings', + 'unbound.threads.recursion', 'line'], + 'lines': [ + ['{shortname}_recursive_avg', 'average', 'absolute', 1, PRECISION], + ['{shortname}_recursive_med', 'median', 'absolute', 1, PRECISION] + ] + }, + '_reqlist': { + 'options': [None, '{longname} Request List', 'items', 'Request List', 'unbound.threads.reqlist', 'line'], + 'lines': [ + ['{shortname}_reqlist_avg', 'average_size', 'absolute', 1, 1], + ['{shortname}_reqlist_max', 'maximum_size', 'absolute', 1, 1], + ['{shortname}_reqlist_overwritten', 'overwritten_requests', 'absolute', 1, 1], + ['{shortname}_reqlist_exceeded', 'overruns', 'absolute', 1, 1], + ['{shortname}_reqlist_current', 'current_size', 'absolute', 1, 1], + ['{shortname}_reqlist_user', 'user_requests', 'absolute', 1, 1] + ] + } +} + + +# This maps the Unbound stat names to our names and precision requiremnets. +STAT_MAP = { + 'total.num.queries_ip_ratelimited': ('ratelimit', 1), + 'total.num.cachehits': ('cachehit', 1), + 'total.num.cachemiss': ('cachemiss', 1), + 'total.num.zero_ttl': ('expired', 1), + 'total.num.prefetch': ('prefetch', 1), + 'total.num.recursivereplies': ('recursive', 1), + 'total.requestlist.avg': ('reqlist_avg', 1), + 'total.requestlist.max': ('reqlist_max', 1), + 'total.requestlist.overwritten': ('reqlist_overwritten', 1), + 'total.requestlist.exceeded': ('reqlist_exceeded', 1), + 'total.requestlist.current.all': ('reqlist_current', 1), + 'total.requestlist.current.user': ('reqlist_user', 1), + 'total.recursion.time.avg': ('recursive_avg', PRECISION), + 'total.recursion.time.median': ('recursive_med', PRECISION), + 'msg.cache.count': ('cache_message', 1), + 'rrset.cache.count': ('cache_rrset', 1), + 'infra.cache.count': ('cache_infra', 1), + 'key.cache.count': ('cache_key', 1), + 'dnscrypt_shared_secret.cache.count': ('cache_dnscss', 1), + 'dnscrypt_nonce.cache.count': ('cache_dnscn', 1) +} + +# Same as above, but for per-thread stats. +PER_THREAD_STAT_MAP = { + '{shortname}.num.queries_ip_ratelimited': ('{shortname}_ratelimit', 1), + '{shortname}.num.cachehits': ('{shortname}_cachehit', 1), + '{shortname}.num.cachemiss': ('{shortname}_cachemiss', 1), + '{shortname}.num.zero_ttl': ('{shortname}_expired', 1), + '{shortname}.num.prefetch': ('{shortname}_prefetch', 1), + '{shortname}.num.recursivereplies': ('{shortname}_recursive', 1), + '{shortname}.requestlist.avg': ('{shortname}_reqlist_avg', 1), + '{shortname}.requestlist.max': ('{shortname}_reqlist_max', 1), + '{shortname}.requestlist.overwritten': ('{shortname}_reqlist_overwritten', 1), + '{shortname}.requestlist.exceeded': ('{shortname}_reqlist_exceeded', 1), + '{shortname}.requestlist.current.all': ('{shortname}_reqlist_current', 1), + '{shortname}.requestlist.current.user': ('{shortname}_reqlist_user', 1), + '{shortname}.recursion.time.avg': ('{shortname}_recursive_avg', PRECISION), + '{shortname}.recursion.time.median': ('{shortname}_recursive_med', PRECISION) +} + + +# Used to actually generate per-thread charts. +def _get_perthread_info(thread): + sname = 'thread{0}'.format(thread) + lname = 'Thread {0}'.format(thread) + charts = dict() + order = [] + statmap = dict() + + for item in PER_THREAD_CHARTS: + cname = '{0}{1}'.format(sname, item) + chart = deepcopy(PER_THREAD_CHARTS[item]) + chart['options'][1] = chart['options'][1].format(longname=lname) + + for index, line in enumerate(chart['lines']): + chart['lines'][index][0] = line[0].format(shortname=sname) + + order.append(cname) + charts[cname] = chart + + for key, value in PER_THREAD_STAT_MAP.items(): + statmap[key.format(shortname=sname)] = (value[0].format(shortname=sname), value[1]) + + return (charts, order, statmap) + + +class Service(SocketService): + def __init__(self, configuration=None, name=None): + # The unbound control protocol is always TLS encapsulated + # unless it's used over a UNIX socket, so enable TLS _before_ + # doing the normal SocketService initialization. + configuration['tls'] = True + self.port = 8935 + SocketService.__init__(self, configuration, name) + self.ext = self.configuration.get('extended', None) + self.ubconf = self.configuration.get('ubconf', None) + self.perthread = self.configuration.get('per_thread', False) + self.threads = None + self.order = deepcopy(ORDER) + self.definitions = deepcopy(CHARTS) + self.request = 'UBCT1 stats\n' + self.statmap = deepcopy(STAT_MAP) + self._parse_config() + self._auto_config() + self.debug('Extended stats: {0}'.format(self.ext)) + self.debug('Per-thread stats: {0}'.format(self.perthread)) + if self.ext: + self.order = self.order + EXTENDED_ORDER + self.definitions.update(EXTENDED_CHARTS) + if self.unix_socket: + self.debug('Using unix socket: {0}'.format(self.unix_socket)) + else: + self.debug('Connecting to: {0}:{1}'.format(self.host, self.port)) + self.debug('Using key: {0}'.format(self.key)) + self.debug('Using certificate: {0}'.format(self.cert)) + + def _auto_config(self): + if self.ubconf and os.access(self.ubconf, os.R_OK): + self.debug('Unbound config: {0}'.format(self.ubconf)) + conf = YamlOrderedLoader.load_config_from_file(self.ubconf)[0] + if self.ext is None: + if 'extended-statistics' in conf['server']: + self.ext = conf['server']['extended-statistics'] + if 'remote-control' in conf: + if conf['remote-control'].get('control-use-cert', False): + self.key = self.key or conf['remote-control'].get('control-key-file') + self.cert = self.cert or conf['remote-control'].get('control-cert-file') + self.port = self.port or conf['remote-control'].get('control-port') + else: + self.unix_socket = self.unix_socket or conf['remote-control'].get('control-interface') + else: + self.debug('Unbound configuration not found.') + if not self.key: + self.key = '/etc/unbound/unbound_control.key' + if not self.cert: + self.cert = '/etc/unbound/unbound_control.pem' + if not self.port: + self.port = 8953 + + def _generate_perthread_charts(self): + tmporder = list() + for thread in range(0, self.threads): + charts, order, statmap = _get_perthread_info(thread) + tmporder.extend(order) + self.definitions.update(charts) + self.statmap.update(statmap) + self.order.extend(sorted(tmporder)) + + def check(self): + # Check if authentication is working. + self._connect() + result = bool(self._sock) + self._disconnect() + # If auth works, and we need per-thread charts, query the server + # to see how many threads it's using. This somewhat abuses the + # SocketService API to get the data we need. + if result and self.perthread: + tmp = self.request + if sys.version_info[0] < 3: + self.request = 'UBCT1 status\n' + else: + self.request = b'UBCT1 status\n' + raw = self._get_raw_data() + for line in raw.splitlines(): + if line.startswith('threads'): + self.threads = int(line.split()[1]) + self._generate_perthread_charts() + break + if self.threads is None: + self.info('Unable to auto-detect thread counts, disabling per-thread stats.') + self.perthread = False + self.request = tmp + return result + + @staticmethod + def _check_raw_data(data): + # The server will close the connection when it's done sending + # data, so just keep looping until that happens. + return False + + def _get_data(self): + raw = self._get_raw_data() + data = dict() + tmp = dict() + for line in raw.splitlines(): + stat = line.split('=') + tmp[stat[0]] = stat[1] + for item in self.statmap: + if item in tmp: + data[self.statmap[item][0]] = float(tmp[item]) * self.statmap[item][1] + return data diff --git a/collectors/python.d.plugin/unbound/unbound.conf b/collectors/python.d.plugin/unbound/unbound.conf new file mode 100644 index 000000000..46c4b097f --- /dev/null +++ b/collectors/python.d.plugin/unbound/unbound.conf @@ -0,0 +1,87 @@ +# netdata python.d.plugin configuration for unbound +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_everye +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, unbound also supports the following: +# +# host: localhost # The host to connect to. +# port: 8953 # WHat port to use (defaults to 8953) +# socket: /path/to/socket # A path to a UNIX socket to use instead +# # of a TCP connection +# tls_key_file: /path/to/key # The key file to use for authentication +# tls_cert_file: /path/to/key # The certificate to use for authentication +# extended: false # Whether to collect extended stats or not +# per_thread: false # Whether to show charts for per-thread stats +# +# In addition to the above, you can set the following to try and +# auto-detect most settings based on the unbound configuration: +# +# ubconf: /etc/unbound/unbound.conf +# +# Note that the SSL key and certificate need to be readable by the user +# unbound runs as if you're using the regular control interface. +# If you're using a UNIX socket, that has to be readable by the netdata user. + +# The following should work for most users if they have unbound configured +# correctly. +local: + ubconf: /etc/unbound/unbound.conf |