From a64a253794ac64cb40befee54db53bde17dd0d49 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 7 Nov 2018 13:19:29 +0100 Subject: New upstream version 1.11.0+dfsg Signed-off-by: Daniel Baumann --- collectors/python.d.plugin/proxysql/Makefile.inc | 13 + collectors/python.d.plugin/proxysql/README.md | 62 ++++ .../python.d.plugin/proxysql/proxysql.chart.py | 356 +++++++++++++++++++++ collectors/python.d.plugin/proxysql/proxysql.conf | 118 +++++++ 4 files changed, 549 insertions(+) create mode 100644 collectors/python.d.plugin/proxysql/Makefile.inc create mode 100644 collectors/python.d.plugin/proxysql/README.md create mode 100644 collectors/python.d.plugin/proxysql/proxysql.chart.py create mode 100644 collectors/python.d.plugin/proxysql/proxysql.conf (limited to 'collectors/python.d.plugin/proxysql') diff --git a/collectors/python.d.plugin/proxysql/Makefile.inc b/collectors/python.d.plugin/proxysql/Makefile.inc new file mode 100644 index 000000000..66be372ce --- /dev/null +++ b/collectors/python.d.plugin/proxysql/Makefile.inc @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +# THIS IS NOT A COMPLETE Makefile +# IT IS INCLUDED BY ITS PARENT'S Makefile.am +# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT + +# install these files +dist_python_DATA += proxysql/proxysql.chart.py +dist_pythonconfig_DATA += proxysql/proxysql.conf + +# do not install these files, but include them in the distribution +dist_noinst_DATA += proxysql/README.md proxysql/Makefile.inc + diff --git a/collectors/python.d.plugin/proxysql/README.md b/collectors/python.d.plugin/proxysql/README.md new file mode 100644 index 000000000..02388276e --- /dev/null +++ b/collectors/python.d.plugin/proxysql/README.md @@ -0,0 +1,62 @@ +# proxysql + +This module monitors proxysql backend and frontend performance metrics. + +It produces: + +1. **Connections (frontend)** + * connected: number of frontend connections currently connected + * aborted: number of frontend connections aborted due to invalid credential or max_connections reached + * non_idle: number of frontend connections that are not currently idle + * created: number of frontend connections created +2. **Questions (frontend)** + * questions: total number of queries sent from frontends + * slow_queries: number of queries that ran for longer than the threshold in milliseconds defined in global variable `mysql-long_query_time` +3. **Overall Bandwith (backends)** + * in + * out +4. **Status (backends)** + * Backends + * `1=ONLINE`: backend server is fully operational + * `2=SHUNNED`: backend sever is temporarily taken out of use because of either too many connection errors in a time that was too short, or replication lag exceeded the allowed threshold + * `3=OFFLINE_SOFT`: when a server is put into OFFLINE_SOFT mode, new incoming connections aren't accepted anymore, while the existing connections are kept until they became inactive. In other words, connections are kept in use until the current transaction is completed. This allows to gracefully detach a backend + * `4=OFFLINE_HARD`: when a server is put into OFFLINE_HARD mode, the existing connections are dropped, while new incoming connections aren't accepted either. This is equivalent to deleting the server from a hostgroup, or temporarily taking it out of the hostgroup for maintenance work + * `-1`: Unknown status +5. **Bandwith (backends)** + * Backends + * in + * out +6. **Queries (backends)** + * Backends + * queries +7. **Latency (backends)** + * Backends + * ping time +8. **Pool connections (backends)** + * Backends + * Used: The number of connections are currently used by ProxySQL for sending queries to the backend server. + * Free: The number of connections are currently free. + * Established/OK: The number of connections were established successfully. + * Error: The number of connections weren't established successfully. +9. **Commands** + * Commands + * Count + * Duration (Total duration for each command) +10. **Commands Histogram** + * Commands + * 100us, 500us, ..., 10s, inf: the total number of commands of the given type which executed within the specified time limit and the previous one. + +### configuration + +```yaml +tcpipv4: + name : 'local' + user : 'stats' + pass : 'stats' + host : '127.0.0.1' + port : '6032' +``` + +If no configuration is given, module will fail to run. + +--- diff --git a/collectors/python.d.plugin/proxysql/proxysql.chart.py b/collectors/python.d.plugin/proxysql/proxysql.chart.py new file mode 100644 index 000000000..f7e3d49f9 --- /dev/null +++ b/collectors/python.d.plugin/proxysql/proxysql.chart.py @@ -0,0 +1,356 @@ +# -*- coding: utf-8 -*- +# Description: Proxysql netdata python.d module +# Author: Ali Borhani (alibo) +# SPDX-License-Identifier: GPL-3.0+ + +from bases.FrameworkServices.MySQLService import MySQLService + +# default module values (can be overridden per job in `config`) +# update_every = 3 +priority = 60000 +retries = 60 + + +def query(table, *params): + return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params)) + + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_global +QUERY_GLOBAL = query( + "stats_mysql_global", + "Variable_Name", + "Variable_Value" +) + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_connection_pool +QUERY_CONNECTION_POOL = query( + "stats_mysql_connection_pool", + "hostgroup", + "srv_host", + "srv_port", + "status", + "ConnUsed", + "ConnFree", + "ConnOK", + "ConnERR", + "Queries", + "Bytes_data_sent", + "Bytes_data_recv", + "Latency_us" +) + +# https://github.com/sysown/proxysql/blob/master/doc/admin_tables.md#stats_mysql_commands_counters +QUERY_COMMANDS = query( + "stats_mysql_commands_counters", + "Command", + "Total_Time_us", + "Total_cnt", + "cnt_100us", + "cnt_500us", + "cnt_1ms", + "cnt_5ms", + "cnt_10ms", + "cnt_50ms", + "cnt_100ms", + "cnt_500ms", + "cnt_1s", + "cnt_5s", + "cnt_10s", + "cnt_INFs" +) + +GLOBAL_STATS = [ + 'client_connections_aborted', + 'client_connections_connected', + 'client_connections_created', + 'client_connections_non_idle', + 'proxysql_uptime', + 'questions', + 'slow_queries' +] + +CONNECTION_POOL_STATS = [ + 'status', + 'connused', + 'connfree', + 'connok', + 'connerr', + 'queries', + 'bytes_data_sent', + 'bytes_data_recv', + 'latency_us' +] + +ORDER = [ + 'connections', + 'active_transactions', + 'questions', + 'pool_overall_net', + 'commands_count', + 'commands_duration', + 'pool_status', + 'pool_net', + 'pool_queries', + 'pool_latency', + 'pool_connection_used', + 'pool_connection_free', + 'pool_connection_ok', + 'pool_connection_error' +] + +HISTOGRAM_ORDER = [ + '100us', + '500us', + '1ms', + '5ms', + '10ms', + '50ms', + '100ms', + '500ms', + '1s', + '5s', + '10s', + 'inf' +] + +STATUS = { + "ONLINE": 1, + "SHUNNED": 2, + "OFFLINE_SOFT": 3, + "OFFLINE_HARD": 4 +} + +CHARTS = { + 'pool_status': { + 'options': [None, 'ProxySQL Backend Status', 'status', 'status', 'proxysql.pool_status', 'line'], + 'lines': [] + }, + 'pool_net': { + 'options': [None, 'ProxySQL Backend Bandwidth', 'kilobits/s', 'bandwidth', 'proxysql.pool_net', 'area'], + 'lines': [] + }, + 'pool_overall_net': { + 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth', + 'proxysql.pool_overall_net', 'area'], + 'lines': [ + ['bytes_data_recv', 'in', 'incremental', 8, 1024], + ['bytes_data_sent', 'out', 'incremental', -8, 1024] + ] + }, + 'questions': { + 'options': [None, 'ProxySQL Frontend Questions', 'questions/s', 'questions', 'proxysql.questions', 'line'], + 'lines': [ + ['questions', 'questions', 'incremental'], + ['slow_queries', 'slow_queries', 'incremental'] + ] + }, + 'pool_queries': { + 'options': [None, 'ProxySQL Backend Queries', 'queries/s', 'queries', 'proxysql.queries', 'line'], + 'lines': [] + }, + 'active_transactions': { + 'options': [None, 'ProxySQL Frontend Active Transactions', 'transactions/s', 'active_transactions', + 'proxysql.active_transactions', 'line'], + 'lines': [ + ['active_transactions', 'active_transactions', 'absolute'] + ] + }, + 'pool_latency': { + 'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'], + 'lines': [] + }, + 'connections': { + 'options': [None, 'ProxySQL Frontend Connections', 'connections/s', 'connections', 'proxysql.connections', + 'line'], + 'lines': [ + ['client_connections_connected', 'connected', 'absolute'], + ['client_connections_created', 'created', 'incremental'], + ['client_connections_aborted', 'aborted', 'incremental'], + ['client_connections_non_idle', 'non_idle', 'absolute'] + ] + }, + 'pool_connection_used': { + 'options': [None, 'ProxySQL Used Connections', 'connections', 'pool_connections', + 'proxysql.pool_used_connections', 'line'], + 'lines': [] + }, + 'pool_connection_free': { + 'options': [None, 'ProxySQL Free Connections', 'connections', 'pool_connections', + 'proxysql.pool_free_connections', 'line'], + 'lines': [] + }, + 'pool_connection_ok': { + 'options': [None, 'ProxySQL Established Connections', 'connections', 'pool_connections', + 'proxysql.pool_ok_connections', 'line'], + 'lines': [] + }, + 'pool_connection_error': { + 'options': [None, 'ProxySQL Error Connections', 'connections', 'pool_connections', + 'proxysql.pool_error_connections', 'line'], + 'lines': [] + }, + 'commands_count': { + 'options': [None, 'ProxySQL Commands', 'commands', 'commands', 'proxysql.commands_count', 'line'], + 'lines': [] + }, + 'commands_duration': { + 'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'], + 'lines': [] + } +} + + +class Service(MySQLService): + def __init__(self, configuration=None, name=None): + MySQLService.__init__(self, configuration=configuration, name=name) + self.order = ORDER + self.definitions = CHARTS + self.queries = dict( + global_status=QUERY_GLOBAL, + connection_pool_status=QUERY_CONNECTION_POOL, + commands_status=QUERY_COMMANDS + ) + + def _get_data(self): + raw_data = self._get_raw_data(description=True) + + if not raw_data: + return None + + to_netdata = dict() + + if 'global_status' in raw_data: + global_status = dict(raw_data['global_status'][0]) + for key in global_status: + if key.lower() in GLOBAL_STATS: + to_netdata[key.lower()] = global_status[key] + + if 'connection_pool_status' in raw_data: + + to_netdata['bytes_data_recv'] = 0 + to_netdata['bytes_data_sent'] = 0 + + for record in raw_data['connection_pool_status'][0]: + backend = self.generate_backend(record) + name = self.generate_backend_name(backend) + + for key in backend: + if key in CONNECTION_POOL_STATS: + if key == 'status': + backend[key] = self.convert_status(backend[key]) + + if len(self.charts) > 0: + if (name + '_status') not in self.charts['pool_status']: + self.add_backend_dimensions(name) + + to_netdata["{0}_{1}".format(name, key)] = backend[key] + + if key == 'bytes_data_recv': + to_netdata['bytes_data_recv'] += int(backend[key]) + + if key == 'bytes_data_sent': + to_netdata['bytes_data_sent'] += int(backend[key]) + + if 'commands_status' in raw_data: + for record in raw_data['commands_status'][0]: + cmd = self.generate_command_stats(record) + name = cmd['name'] + + if len(self.charts) > 0: + if (name + '_count') not in self.charts['commands_count']: + self.add_command_dimensions(name) + self.add_histogram_chart(cmd) + + to_netdata[name + '_count'] = cmd['count'] + to_netdata[name + '_duration'] = cmd['duration'] + for histogram in cmd['histogram']: + dimId = 'commands_histogram_{0}_{1}'.format(name, histogram) + to_netdata[dimId] = cmd['histogram'][histogram] + + return to_netdata or None + + def add_backend_dimensions(self, name): + self.charts['pool_status'].add_dimension([name + '_status', name, 'absolute']) + self.charts['pool_net'].add_dimension([name + '_bytes_data_recv', 'from_' + name, 'incremental', 8, 1024]) + self.charts['pool_net'].add_dimension([name + '_bytes_data_sent', 'to_' + name, 'incremental', -8, 1024]) + self.charts['pool_queries'].add_dimension([name + '_queries', name, 'incremental']) + self.charts['pool_latency'].add_dimension([name + '_latency_us', name, 'absolute', 1, 1000]) + self.charts['pool_connection_used'].add_dimension([name + '_connused', name, 'absolute']) + self.charts['pool_connection_free'].add_dimension([name + '_connfree', name, 'absolute']) + self.charts['pool_connection_ok'].add_dimension([name + '_connok', name, 'incremental']) + self.charts['pool_connection_error'].add_dimension([name + '_connerr', name, 'incremental']) + + def add_command_dimensions(self, cmd): + self.charts['commands_count'].add_dimension([cmd + '_count', cmd, 'incremental']) + self.charts['commands_duration'].add_dimension([cmd + '_duration', cmd, 'incremental', 1, 1000]) + + def add_histogram_chart(self, cmd): + chart = self.charts.add_chart(self.histogram_chart(cmd)) + + for histogram in HISTOGRAM_ORDER: + dimId = 'commands_histogram_{0}_{1}'.format(cmd['name'], histogram) + chart.add_dimension([dimId, histogram, 'incremental']) + + @staticmethod + def histogram_chart(cmd): + return [ + 'commands_historgram_' + cmd['name'], + None, + 'ProxySQL {0} Command Histogram'.format(cmd['name'].title()), + 'commands', + 'commands_histogram', + 'proxysql.commands_histogram_' + cmd['name'], + 'stacked' + ] + + @staticmethod + def generate_backend(data): + return { + 'hostgroup': data[0], + 'srv_host': data[1], + 'srv_port': data[2], + 'status': data[3], + 'connused': data[4], + 'connfree': data[5], + 'connok': data[6], + 'connerr': data[7], + 'queries': data[8], + 'bytes_data_sent': data[9], + 'bytes_data_recv': data[10], + 'latency_us': data[11] + } + + @staticmethod + def generate_command_stats(data): + return { + 'name': data[0].lower(), + 'duration': data[1], + 'count': data[2], + 'histogram': { + '100us': data[3], + '500us': data[4], + '1ms': data[5], + '5ms': data[6], + '10ms': data[7], + '50ms': data[8], + '100ms': data[9], + '500ms': data[10], + '1s': data[11], + '5s': data[12], + '10s': data[13], + 'inf': data[14] + } + } + + @staticmethod + def generate_backend_name(backend): + hostgroup = backend['hostgroup'].replace(' ', '_').lower() + host = backend['srv_host'].replace('.', '_') + + return "{0}_{1}_{2}".format(hostgroup, host, backend['srv_port']) + + @staticmethod + def convert_status(status): + if status in STATUS: + return STATUS[status] + return -1 diff --git a/collectors/python.d.plugin/proxysql/proxysql.conf b/collectors/python.d.plugin/proxysql/proxysql.conf new file mode 100644 index 000000000..d29c2e5be --- /dev/null +++ b/collectors/python.d.plugin/proxysql/proxysql.conf @@ -0,0 +1,118 @@ +# netdata python.d.plugin configuration for ProxySQL +# +# This file is in YaML format. Generally the format is: +# +# name: value +# +# There are 2 sections: +# - global variables +# - one or more JOBS +# +# JOBS allow you to collect values from multiple sources. +# Each source will have its own set of charts. +# +# JOB parameters have to be indented (using spaces only, example below). + +# ---------------------------------------------------------------------- +# Global Variables +# These variables set the defaults for all JOBs, however each JOB +# may define its own, overriding the defaults. + +# update_every sets the default data collection frequency. +# If unset, the python.d.plugin default is used. +# update_every: 1 + +# priority controls the order of charts at the netdata dashboard. +# Lower numbers move the charts towards the top of the page. +# If unset, the default for python.d.plugin is used. +# priority: 60000 + +# retries sets the number of retries to be made in case of failures. +# If unset, the default for python.d.plugin is used. +# Attempts to restore the service are made once every update_every +# and only if the module has collected values in the past. +# retries: 60 + +# autodetection_retry sets the job re-check interval in seconds. +# The job is not deleted if check fails. +# Attempts to start the job are made once every autodetection_retry. +# This feature is disabled by default. +# autodetection_retry: 0 + +# ---------------------------------------------------------------------- +# JOBS (data collection sources) +# +# The default JOBS share the same *name*. JOBS with the same name +# are mutually exclusive. Only one of them will be allowed running at +# any time. This allows autodetection to try several alternatives and +# pick the one that works. +# +# Any number of jobs is supported. +# +# All python.d.plugin JOBS (for all its modules) support a set of +# predefined parameters. These are: +# +# job_name: +# name: myname # the JOB's name as it will appear at the +# # dashboard (by default is the job_name) +# # JOBs sharing a name are mutually exclusive +# update_every: 1 # the JOB's data collection frequency +# priority: 60000 # the JOB's order on the dashboard +# retries: 60 # the JOB's number of restoration attempts +# autodetection_retry: 0 # the JOB's re-check interval in seconds +# +# Additionally to the above, proxysql also supports the following: +# +# host: 'IP or HOSTNAME' # the host to connect to +# port: PORT # the port to connect to +# +# in all cases, the following can also be set: +# +# user: 'username' # the proxysql username to use +# pass: 'password' # the proxysql password to use +# + +# AUTO-DETECTION JOBS +# only one of them will run (they have the same name) + +tcp: + name : 'local' + user : 'stats' + pass : 'stats' + host : 'localhost' + port : '6032' + +tcpipv4: + name : 'local' + user : 'stats' + pass : 'stats' + host : '127.0.0.1' + port : '6032' + +tcpipv6: + name : 'local' + user : 'stats' + pass : 'stats' + host : '::1' + port : '6032' + +tcp_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : 'localhost' + port : '6032' + +tcpipv4_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : '127.0.0.1' + port : '6032' + +tcpipv6_admin: + name : 'local' + user : 'admin' + pass : 'admin' + host : '::1' + port : '6032' -- cgit v1.2.3