summaryrefslogtreecommitdiffstats
path: root/collectors/python.d.plugin/varnish
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 14:31:17 +0000
commit8020f71afd34d7696d7933659df2d763ab05542f (patch)
tree2fdf1b5447ffd8bdd61e702ca183e814afdcb4fc /collectors/python.d.plugin/varnish
parentInitial commit. (diff)
downloadnetdata-8020f71afd34d7696d7933659df2d763ab05542f.tar.xz
netdata-8020f71afd34d7696d7933659df2d763ab05542f.zip
Adding upstream version 1.37.1.upstream/1.37.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/python.d.plugin/varnish')
-rw-r--r--collectors/python.d.plugin/varnish/Makefile.inc13
-rw-r--r--collectors/python.d.plugin/varnish/README.md65
-rw-r--r--collectors/python.d.plugin/varnish/varnish.chart.py385
-rw-r--r--collectors/python.d.plugin/varnish/varnish.conf66
4 files changed, 529 insertions, 0 deletions
diff --git a/collectors/python.d.plugin/varnish/Makefile.inc b/collectors/python.d.plugin/varnish/Makefile.inc
new file mode 100644
index 0000000..2469b05
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/Makefile.inc
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# THIS IS NOT A COMPLETE Makefile
+# IT IS INCLUDED BY ITS PARENT'S Makefile.am
+# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
+
+# install these files
+dist_python_DATA += varnish/varnish.chart.py
+dist_pythonconfig_DATA += varnish/varnish.conf
+
+# do not install these files, but include them in the distribution
+dist_noinst_DATA += varnish/README.md varnish/Makefile.inc
+
diff --git a/collectors/python.d.plugin/varnish/README.md b/collectors/python.d.plugin/varnish/README.md
new file mode 100644
index 0000000..018905f
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/README.md
@@ -0,0 +1,65 @@
+<!--
+title: "Varnish Cache monitoring with Netdata"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/python.d.plugin/varnish/README.md
+sidebar_label: "Varnish Cache"
+-->
+
+# Varnish Cache monitoring with Netdata
+
+Provides HTTP accelerator global, Backends (VBE) and Storages (SMF, SMA, MSE) statistics using `varnishstat` tool.
+
+Note that both, Varnish-Cache (free and open source) and Varnish-Plus (Commercial/Enterprise version), are supported.
+
+## Requirements
+
+- `netdata` user must be a member of the `varnish` group
+
+## Charts
+
+This module produces the following charts:
+
+- Connections Statistics in `connections/s`
+- Client Requests in `requests/s`
+- All History Hit Rate Ratio in `percent`
+- Current Poll Hit Rate Ratio in `percent`
+- Expired Objects in `expired/s`
+- Least Recently Used Nuked Objects in `nuked/s`
+- Number Of Threads In All Pools in `pools`
+- Threads Statistics in `threads/s`
+- Current Queue Length in `requests`
+- Backend Connections Statistics in `connections/s`
+- Requests To The Backend in `requests/s`
+- ESI Statistics in `problems/s`
+- Memory Usage in `MiB`
+- Uptime in `seconds`
+
+For every backend (VBE):
+
+- Backend Response Statistics in `kilobits/s`
+
+For every storage (SMF, SMA, or MSE):
+
+- Storage Usage in `KiB`
+- Storage Allocated Objects
+
+## Configuration
+
+Edit the `python.d/varnish.conf` configuration file using `edit-config` from the Netdata [config
+directory](/docs/configure/nodes.md), which is typically at `/etc/netdata`.
+
+```bash
+cd /etc/netdata # Replace this path with your Netdata config directory, if different
+sudo ./edit-config python.d/varnish.conf
+```
+
+Only one parameter is supported:
+
+```yaml
+instance_name: 'name'
+```
+
+The name of the `varnishd` instance to get logs from. If not specified, the host name is used.
+
+---
+
+
diff --git a/collectors/python.d.plugin/varnish/varnish.chart.py b/collectors/python.d.plugin/varnish/varnish.chart.py
new file mode 100644
index 0000000..506ad02
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.chart.py
@@ -0,0 +1,385 @@
+# -*- coding: utf-8 -*-
+# Description: varnish netdata python.d module
+# Author: ilyam8
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+import re
+
+from bases.FrameworkServices.ExecutableService import ExecutableService
+from bases.collection import find_binary
+
+ORDER = [
+ 'session_connections',
+ 'client_requests',
+ 'all_time_hit_rate',
+ 'current_poll_hit_rate',
+ 'cached_objects_expired',
+ 'cached_objects_nuked',
+ 'threads_total',
+ 'threads_statistics',
+ 'threads_queue_len',
+ 'backend_connections',
+ 'backend_requests',
+ 'esi_statistics',
+ 'memory_usage',
+ 'uptime'
+]
+
+CHARTS = {
+ 'session_connections': {
+ 'options': [None, 'Connections Statistics', 'connections/s',
+ 'client metrics', 'varnish.session_connection', 'line'],
+ 'lines': [
+ ['sess_conn', 'accepted', 'incremental'],
+ ['sess_dropped', 'dropped', 'incremental']
+ ]
+ },
+ 'client_requests': {
+ 'options': [None, 'Client Requests', 'requests/s',
+ 'client metrics', 'varnish.client_requests', 'line'],
+ 'lines': [
+ ['client_req', 'received', 'incremental']
+ ]
+ },
+ 'all_time_hit_rate': {
+ 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.all_time_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-absolute-row'],
+ ['cache_miss', 'miss', 'percentage-of-absolute-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
+ },
+ 'current_poll_hit_rate': {
+ 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
+ 'varnish.current_poll_hit_rate', 'stacked'],
+ 'lines': [
+ ['cache_hit', 'hit', 'percentage-of-incremental-row'],
+ ['cache_miss', 'miss', 'percentage-of-incremental-row'],
+ ['cache_hitpass', 'hitpass', 'percentage-of-incremental-row']
+ ]
+ },
+ 'cached_objects_expired': {
+ 'options': [None, 'Expired Objects', 'expired/s', 'cache performance',
+ 'varnish.cached_objects_expired', 'line'],
+ 'lines': [
+ ['n_expired', 'objects', 'incremental']
+ ]
+ },
+ 'cached_objects_nuked': {
+ 'options': [None, 'Least Recently Used Nuked Objects', 'nuked/s', 'cache performance',
+ 'varnish.cached_objects_nuked', 'line'],
+ 'lines': [
+ ['n_lru_nuked', 'objects', 'incremental']
+ ]
+ },
+ 'threads_total': {
+ 'options': [None, 'Number Of Threads In All Pools', 'number', 'thread related metrics',
+ 'varnish.threads_total', 'line'],
+ 'lines': [
+ ['threads', None, 'absolute']
+ ]
+ },
+ 'threads_statistics': {
+ 'options': [None, 'Threads Statistics', 'threads/s', 'thread related metrics',
+ 'varnish.threads_statistics', 'line'],
+ 'lines': [
+ ['threads_created', 'created', 'incremental'],
+ ['threads_failed', 'failed', 'incremental'],
+ ['threads_limited', 'limited', 'incremental']
+ ]
+ },
+ 'threads_queue_len': {
+ 'options': [None, 'Current Queue Length', 'requests', 'thread related metrics',
+ 'varnish.threads_queue_len', 'line'],
+ 'lines': [
+ ['thread_queue_len', 'in queue']
+ ]
+ },
+ 'backend_connections': {
+ 'options': [None, 'Backend Connections Statistics', 'connections/s', 'backend metrics',
+ 'varnish.backend_connections', 'line'],
+ 'lines': [
+ ['backend_conn', 'successful', 'incremental'],
+ ['backend_unhealthy', 'unhealthy', 'incremental'],
+ ['backend_reuse', 'reused', 'incremental'],
+ ['backend_toolate', 'closed', 'incremental'],
+ ['backend_recycle', 'recycled', 'incremental'],
+ ['backend_fail', 'failed', 'incremental']
+ ]
+ },
+ 'backend_requests': {
+ 'options': [None, 'Requests To The Backend', 'requests/s', 'backend metrics',
+ 'varnish.backend_requests', 'line'],
+ 'lines': [
+ ['backend_req', 'sent', 'incremental']
+ ]
+ },
+ 'esi_statistics': {
+ 'options': [None, 'ESI Statistics', 'problems/s', 'esi related metrics', 'varnish.esi_statistics', 'line'],
+ 'lines': [
+ ['esi_errors', 'errors', 'incremental'],
+ ['esi_warnings', 'warnings', 'incremental']
+ ]
+ },
+ 'memory_usage': {
+ 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
+ 'lines': [
+ ['memory_free', 'free', 'absolute', 1, 1 << 20],
+ ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]
+ },
+ 'uptime': {
+ 'lines': [
+ ['uptime', None, 'absolute']
+ ],
+ 'options': [None, 'Uptime', 'seconds', 'uptime', 'varnish.uptime', 'line']
+ }
+}
+
+
+def backend_charts_template(name):
+ order = [
+ '{0}_response_statistics'.format(name),
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Backend "{0}"'.format(name), 'kilobits/s', 'backend response statistics',
+ 'varnish.backend', 'area'],
+ 'lines': [
+ ['{0}_beresp_hdrbytes'.format(name), 'header', 'incremental', 8, 1000],
+ ['{0}_beresp_bodybytes'.format(name), 'body', 'incremental', -8, 1000]
+ ]
+ },
+ }
+
+ return order, charts
+
+
+def storage_charts_template(name):
+ order = [
+ 'storage_{0}_usage'.format(name),
+ 'storage_{0}_alloc_objs'.format(name)
+ ]
+
+ charts = {
+ order[0]: {
+ 'options': [None, 'Storage "{0}" Usage'.format(name), 'KiB', 'storage usage', 'varnish.storage_usage', 'stacked'],
+ 'lines': [
+ ['{0}.g_space'.format(name), 'free', 'absolute', 1, 1 << 10],
+ ['{0}.g_bytes'.format(name), 'allocated', 'absolute', 1, 1 << 10]
+ ]
+ },
+ order[1]: {
+ 'options': [None, 'Storage "{0}" Allocated Objects'.format(name), 'objects', 'storage usage', 'varnish.storage_alloc_objs', 'line'],
+ 'lines': [
+ ['{0}.g_alloc'.format(name), 'allocated', 'absolute']
+ ]
+ }
+ }
+
+ return order, charts
+
+
+VARNISHSTAT = 'varnishstat'
+
+re_version = re.compile(r'varnish-(?:plus-)?(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)')
+
+
+class VarnishVersion:
+ def __init__(self, major, minor, patch):
+ self.major = major
+ self.minor = minor
+ self.patch = patch
+
+ def __str__(self):
+ return '{0}.{1}.{2}'.format(self.major, self.minor, self.patch)
+
+
+class Parser:
+ _backend_new = re.compile(r'VBE.([\d\w_.]+)\(.*?\).(beresp[\w_]+)\s+(\d+)')
+ _backend_old = re.compile(r'VBE\.[\d\w-]+\.([\w\d_-]+).(beresp[\w_]+)\s+(\d+)')
+ _default = re.compile(r'([A-Z]+\.)?([\d\w_.]+)\s+(\d+)')
+
+ def __init__(self):
+ self.re_default = None
+ self.re_backend = None
+
+ def init(self, data):
+ data = ''.join(data)
+ parsed_main = Parser._default.findall(data)
+ if parsed_main:
+ self.re_default = Parser._default
+
+ parsed_backend = Parser._backend_new.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_new
+ else:
+ parsed_backend = Parser._backend_old.findall(data)
+ if parsed_backend:
+ self.re_backend = Parser._backend_old
+
+ def server_stats(self, data):
+ return self.re_default.findall(''.join(data))
+
+ def backend_stats(self, data):
+ return self.re_backend.findall(''.join(data))
+
+
+class Service(ExecutableService):
+ def __init__(self, configuration=None, name=None):
+ ExecutableService.__init__(self, configuration=configuration, name=name)
+ self.order = ORDER
+ self.definitions = CHARTS
+ self.instance_name = configuration.get('instance_name')
+ self.parser = Parser()
+ self.command = None
+ self.collected_vbe = set()
+ self.collected_storages = set()
+
+ def create_command(self):
+ varnishstat = find_binary(VARNISHSTAT)
+
+ if not varnishstat:
+ self.error("can't locate '{0}' binary or binary is not executable by user netdata".format(VARNISHSTAT))
+ return False
+
+ command = [varnishstat, '-V']
+ reply = self._get_raw_data(stderr=True, command=command)
+ if not reply:
+ self.error(
+ "no output from '{0}'. Is varnish running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ ver = parse_varnish_version(reply)
+ if not ver:
+ self.error("failed to parse reply from '{0}', used regex :'{1}', reply : {2}".format(
+ ' '.join(command), re_version.pattern, reply))
+ return False
+
+ if self.instance_name:
+ self.command = [varnishstat, '-1', '-n', self.instance_name]
+ else:
+ self.command = [varnishstat, '-1']
+
+ if ver.major > 4:
+ self.command.extend(['-t', '1'])
+
+ self.info("varnish version: {0}, will use command: '{1}'".format(ver, ' '.join(self.command)))
+
+ return True
+
+ def check(self):
+ if not self.create_command():
+ return False
+
+ # STDOUT is not empty
+ reply = self._get_raw_data()
+ if not reply:
+ self.error("no output from '{0}'. Is it running? Not enough privileges?".format(' '.join(self.command)))
+ return False
+
+ self.parser.init(reply)
+
+ # Output is parsable
+ if not self.parser.re_default:
+ self.error('cant parse the output...')
+ return False
+
+ return True
+
+ def get_data(self):
+ """
+ Format data received from shell command
+ :return: dict
+ """
+ raw = self._get_raw_data()
+ if not raw:
+ return None
+
+ data = dict()
+ server_stats = self.parser.server_stats(raw)
+ if not server_stats:
+ return None
+
+ stats = dict((param, value) for _, param, value in server_stats)
+ data.update(stats)
+
+ self.get_vbe_backends(data, raw)
+ self.get_storages(server_stats)
+
+ # varnish 5 uses default.g_bytes and default.g_space
+ data['memory_allocated'] = data.get('s0.g_bytes') or data.get('default.g_bytes')
+ data['memory_free'] = data.get('s0.g_space') or data.get('default.g_space')
+
+ return data
+
+ def get_vbe_backends(self, data, raw):
+ if not self.parser.re_backend:
+ return
+ stats = self.parser.backend_stats(raw)
+ if not stats:
+ return
+
+ for (name, param, value) in stats:
+ data['_'.join([name, param])] = value
+ if name in self.collected_vbe:
+ continue
+ self.collected_vbe.add(name)
+ self.add_backend_charts(name)
+
+ def get_storages(self, server_stats):
+ # Storage types:
+ # - SMF: File Storage
+ # - SMA: Malloc Storage
+ # - MSE: Massive Storage Engine (Varnish-Plus only)
+ #
+ # Stats example:
+ # [('SMF.', 'ssdStorage.c_req', '47686'),
+ # ('SMF.', 'ssdStorage.c_fail', '0'),
+ # ('SMF.', 'ssdStorage.c_bytes', '668102656'),
+ # ('SMF.', 'ssdStorage.c_freed', '140980224'),
+ # ('SMF.', 'ssdStorage.g_alloc', '39753'),
+ # ('SMF.', 'ssdStorage.g_bytes', '527122432'),
+ # ('SMF.', 'ssdStorage.g_space', '53159968768'),
+ # ('SMF.', 'ssdStorage.g_smf', '40130'),
+ # ('SMF.', 'ssdStorage.g_smf_frag', '311'),
+ # ('SMF.', 'ssdStorage.g_smf_large', '66')]
+ storages = [name for typ, name, _ in server_stats if typ.startswith(('SMF', 'SMA', 'MSE')) and name.endswith('g_space')]
+ if not storages:
+ return
+ for storage in storages:
+ storage = storage.split('.')[0]
+ if storage in self.collected_storages:
+ continue
+ self.collected_storages.add(storage)
+ self.add_storage_charts(storage)
+
+ def add_backend_charts(self, backend_name):
+ self.add_charts(backend_name, backend_charts_template)
+
+ def add_storage_charts(self, storage_name):
+ self.add_charts(storage_name, storage_charts_template)
+
+ def add_charts(self, name, charts_template):
+ order, charts = charts_template(name)
+
+ for chart_name in order:
+ params = [chart_name] + charts[chart_name]['options']
+ dimensions = charts[chart_name]['lines']
+
+ new_chart = self.charts.add_chart(params)
+ for dimension in dimensions:
+ new_chart.add_dimension(dimension)
+
+
+def parse_varnish_version(lines):
+ m = re_version.search(lines[0])
+ if not m:
+ return None
+
+ m = m.groupdict()
+ return VarnishVersion(
+ int(m['major']),
+ int(m['minor']),
+ int(m['patch']),
+ )
diff --git a/collectors/python.d.plugin/varnish/varnish.conf b/collectors/python.d.plugin/varnish/varnish.conf
new file mode 100644
index 0000000..54bfe4d
--- /dev/null
+++ b/collectors/python.d.plugin/varnish/varnish.conf
@@ -0,0 +1,66 @@
+# netdata python.d.plugin configuration for varnish
+#
+# This file is in YaML format. Generally the format is:
+#
+# name: value
+#
+# There are 2 sections:
+# - global variables
+# - one or more JOBS
+#
+# JOBS allow you to collect values from multiple sources.
+# Each source will have its own set of charts.
+#
+# JOB parameters have to be indented (using spaces only, example below).
+
+# ----------------------------------------------------------------------
+# Global Variables
+# These variables set the defaults for all JOBs, however each JOB
+# may define its own, overriding the defaults.
+
+# update_every sets the default data collection frequency.
+# If unset, the python.d.plugin default is used.
+# update_every: 1
+
+# priority controls the order of charts at the netdata dashboard.
+# Lower numbers move the charts towards the top of the page.
+# If unset, the default for python.d.plugin is used.
+# priority: 60000
+
+# penalty indicates whether to apply penalty to update_every in case of failures.
+# Penalty will increase every 5 failed updates in a row. Maximum penalty is 10 minutes.
+# penalty: yes
+
+# autodetection_retry sets the job re-check interval in seconds.
+# The job is not deleted if check fails.
+# Attempts to start the job are made once every autodetection_retry.
+# This feature is disabled by default.
+# autodetection_retry: 0
+
+# ----------------------------------------------------------------------
+# JOBS (data collection sources)
+#
+# The default JOBS share the same *name*. JOBS with the same name
+# are mutually exclusive. Only one of them will be allowed running at
+# any time. This allows autodetection to try several alternatives and
+# pick the one that works.
+#
+# Any number of jobs is supported.
+#
+# All python.d.plugin JOBS (for all its modules) support a set of
+# predefined parameters. These are:
+#
+# job_name:
+# name: myname # the JOB's name as it will appear at the
+# # dashboard (by default is the job_name)
+# # JOBs sharing a name are mutually exclusive
+# update_every: 1 # the JOB's data collection frequency
+# priority: 60000 # the JOB's order on the dashboard
+# penalty: yes # the JOB's penalty
+# autodetection_retry: 0 # the JOB's re-check interval in seconds
+#
+# Additionally to the above, varnish also supports the following:
+#
+# instance_name: 'name' # the name of the varnishd instance to get logs from. If not specified, the host name is used.
+#
+# ----------------------------------------------------------------------