diff options
author | Federico Ceratto <federico.ceratto@gmail.com> | 2018-03-27 21:28:21 +0000 |
---|---|---|
committer | Federico Ceratto <federico.ceratto@gmail.com> | 2018-03-27 21:28:21 +0000 |
commit | d4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c (patch) | |
tree | faac99f51f182bb8c0a03e95e393d421ac9ddf42 /python.d/postgres.chart.py | |
parent | New upstream version 1.9.0+dfsg (diff) | |
download | netdata-d4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c.tar.xz netdata-d4dd00f58a502c9ca4b63e36ce6bc7a9945dc63c.zip |
New upstream version 1.10.0+dfsgupstream/1.10.0+dfsg
Diffstat (limited to 'python.d/postgres.chart.py')
-rw-r--r-- | python.d/postgres.chart.py | 344 |
1 files changed, 305 insertions, 39 deletions
diff --git a/python.d/postgres.chart.py b/python.d/postgres.chart.py index ef69a9c7..0522b193 100644 --- a/python.d/postgres.chart.py +++ b/python.d/postgres.chart.py @@ -32,6 +32,8 @@ METRICS = dict( 'tup_updated', 'tup_deleted', 'conflicts', + 'temp_files', + 'temp_bytes', 'size'], BACKENDS=['backends_active', 'backends_idle'], @@ -39,11 +41,21 @@ METRICS = dict( 'index_size'], TABLE_STATS=['table_size', 'table_count'], + WAL=['written_wal', + 'recycled_wal', + 'total_wal'], + WAL_WRITES=['wal_writes'], ARCHIVE=['ready_count', 'done_count', 'file_count'], - BGWRITER=['writer_scheduled', - 'writer_requested'], + BGWRITER=['checkpoint_scheduled', + 'checkpoint_requested', + 'buffers_checkpoint', + 'buffers_clean', + 'maxwritten_clean', + 'buffers_backend', + 'buffers_alloc', + 'buffers_backend_fsync'], LOCKS=['ExclusiveLock', 'RowShareLock', 'SIReadLock', @@ -52,17 +64,44 @@ METRICS = dict( 'AccessShareLock', 'ShareRowExclusiveLock', 'ShareLock', - 'RowExclusiveLock'] + 'RowExclusiveLock'], + AUTOVACUUM=['analyze', + 'vacuum_analyze', + 'vacuum', + 'vacuum_freeze', + 'brin_summarize'], + STANDBY_DELTA=['sent_delta', + 'write_delta', + 'flush_delta', + 'replay_delta'], + REPSLOT_FILES=['replslot_wal_keep', + 'replslot_files'] + ) QUERIES = dict( + WAL=""" +SELECT + count(*) as total_wal, + count(*) FILTER (WHERE type = 'recycled') AS recycled_wal, + count(*) FILTER (WHERE type = 'written') AS written_wal +FROM + (SELECT wal.name, + pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ), + CASE WHEN wal.name > pg_{0}file_name(CASE pg_is_in_recovery() WHEN true THEN NULL ELSE pg_current_{0}_{1}() END ) THEN 'recycled' + ELSE 'written' + END AS type + FROM pg_catalog.pg_ls_dir('pg_{0}') AS wal(name) + WHERE name ~ '^[0-9A-F]{{24}}$' + ORDER BY (pg_stat_file('pg_{0}/'||name)).modification, wal.name DESC) sub; +""", ARCHIVE=""" SELECT CAST(COUNT(*) AS INT) AS file_count, CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)), 0) AS INT) AS ready_count, CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)), 0) AS INT) AS done_count FROM - pg_catalog.pg_ls_dir('{0}/archive_status') AS archive_files (archive_file); + pg_catalog.pg_ls_dir('pg_{0}/archive_status') AS archive_files (archive_file); """, BACKENDS=""" SELECT @@ -86,28 +125,37 @@ WHERE relkind = 'i';""", DATABASE=""" SELECT datname AS database_name, - sum(numbackends) AS connections, - sum(xact_commit) AS xact_commit, - sum(xact_rollback) AS xact_rollback, - sum(blks_read) AS blks_read, - sum(blks_hit) AS blks_hit, - sum(tup_returned) AS tup_returned, - sum(tup_fetched) AS tup_fetched, - sum(tup_inserted) AS tup_inserted, - sum(tup_updated) AS tup_updated, - sum(tup_deleted) AS tup_deleted, - sum(conflicts) AS conflicts, - pg_database_size(datname) AS size + numbackends AS connections, + xact_commit AS xact_commit, + xact_rollback AS xact_rollback, + blks_read AS blks_read, + blks_hit AS blks_hit, + tup_returned AS tup_returned, + tup_fetched AS tup_fetched, + tup_inserted AS tup_inserted, + tup_updated AS tup_updated, + tup_deleted AS tup_deleted, + conflicts AS conflicts, + pg_database_size(datname) AS size, + temp_files AS temp_files, + temp_bytes AS temp_bytes FROM pg_stat_database WHERE datname IN %(databases)s -GROUP BY datname; +; """, BGWRITER=""" SELECT - checkpoints_timed AS writer_scheduled, - checkpoints_req AS writer_requested -FROM pg_stat_bgwriter;""", - LOCKS=""" + checkpoints_timed AS checkpoint_scheduled, + checkpoints_req AS checkpoint_requested, + buffers_checkpoint * current_setting('block_size')::numeric buffers_checkpoint, + buffers_clean * current_setting('block_size')::numeric buffers_clean, + maxwritten_clean, + buffers_backend * current_setting('block_size')::numeric buffers_backend, + buffers_alloc * current_setting('block_size')::numeric buffers_alloc, + buffers_backend_fsync +FROM pg_stat_bgwriter; +""", + LOCKS=""" SELECT pg_database.datname as database_name, mode, @@ -123,12 +171,69 @@ FROM pg_stat_database WHERE has_database_privilege((SELECT current_user), datname, 'connect') AND NOT datname ~* '^template\d+'; """, + FIND_STANDBY=""" +SELECT application_name +FROM pg_stat_replication +WHERE application_name IS NOT NULL +GROUP BY application_name; +""", + FIND_REPLICATION_SLOT=""" +SELECT slot_name +FROM pg_replication_slots; +""", + STANDBY_DELTA=""" +SELECT application_name, + pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , sent_{1}) AS sent_delta, + pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , write_{1}) AS write_delta, + pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , flush_{1}) AS flush_delta, + pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END , replay_{1}) AS replay_delta +FROM pg_stat_replication +WHERE application_name IS NOT NULL; +""", + REPSLOT_FILES=""" +WITH wal_size AS ( + SELECT current_setting('wal_block_size')::INT * setting::INT AS val + FROM pg_settings + WHERE name = 'wal_segment_size' +) +SELECT slot_name, slot_type, replslot_wal_keep, count(slot_file) AS replslot_files +FROM ( + SELECT slot.slot_name, CASE WHEN slot_file <> 'state' THEN 1 END AS slot_file , slot_type, + COALESCE (floor((pg_wal_lsn_diff (pg_current_wal_lsn (), + slot.restart_lsn) - (pg_walfile_name_offset (restart_lsn)).file_offset) / (s.val)), + 0) AS replslot_wal_keep + FROM pg_replication_slots slot + LEFT JOIN ( + SELECT slot2.slot_name, + pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file + FROM pg_replication_slots slot2 + ) files (slot_name, slot_file) + ON slot.slot_name = files.slot_name + CROSS JOIN wal_size s) AS d +GROUP BY slot_name, slot_type, replslot_wal_keep; +""", IF_SUPERUSER=""" SELECT current_setting('is_superuser') = 'on' AS is_superuser; - """, +""", DETECT_SERVER_VERSION=""" SHOW server_version_num; - """ +""", + AUTOVACUUM=""" +SELECT + count(*) FILTER (WHERE query LIKE 'autovacuum: ANALYZE%%') AS analyze, + count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM ANALYZE%%') AS vacuum_analyze, + count(*) FILTER (WHERE query LIKE 'autovacuum: VACUUM%%' + AND query NOT LIKE 'autovacuum: VACUUM ANALYZE%%' + AND query NOT LIKE '%%to prevent wraparound%%') AS vacuum, + count(*) FILTER (WHERE query LIKE '%%to prevent wraparound%%') AS vacuum_freeze, + count(*) FILTER (WHERE query LIKE 'autovacuum: BRIN summarize%%') AS brin_summarize +FROM pg_stat_activity +WHERE query NOT LIKE '%%pg_stat_activity%%'; +""", + DIFF_LSN=""" +SELECT pg_{0}_{1}_diff(CASE pg_is_in_recovery() WHEN true THEN pg_last_{0}_receive_{1}() ELSE pg_current_{0}_{1}() END, '0/0') as wal_writes ; +""" + ) @@ -138,8 +243,11 @@ QUERY_STATS = { QUERIES['LOCKS']: METRICS['LOCKS'] } -ORDER = ['db_stat_transactions', 'db_stat_tuple_read', 'db_stat_tuple_returned', 'db_stat_tuple_write', 'database_size', - 'backend_process', 'index_count', 'index_size', 'table_count', 'table_size', 'wal', 'background_writer'] +ORDER = ['db_stat_temp_files', 'db_stat_temp_bytes', 'db_stat_blks', 'db_stat_tuple_returned', 'db_stat_tuple_write', + 'db_stat_transactions','db_stat_connections', 'database_size', 'backend_process', 'index_count', 'index_size', + 'table_count', 'table_size', 'wal', 'wal_writes', 'archive_wal', 'checkpointer', 'stat_bgwriter_alloc', 'stat_bgwriter_checkpoint', + 'stat_bgwriter_backend', 'stat_bgwriter_backend_fsync' , 'stat_bgwriter_bgwriter', 'stat_bgwriter_maxwritten', + 'replication_slot', 'standby_delta', 'autovacuum'] CHARTS = { 'db_stat_transactions': { @@ -155,8 +263,8 @@ CHARTS = { 'lines': [ ['connections', 'connections', 'absolute'] ]}, - 'db_stat_tuple_read': { - 'options': [None, 'Tuple reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_tuple_read', 'line'], + 'db_stat_blks': { + 'options': [None, 'Disk blocks reads from db', 'reads/s', 'db statistics', 'postgres.db_stat_blks', 'line'], 'lines': [ ['blks_read', 'disk', 'incremental'], ['blks_hit', 'cache', 'incremental'] @@ -176,6 +284,16 @@ CHARTS = { ['tup_deleted', 'deleted', 'incremental'], ['conflicts', 'conflicts', 'incremental'] ]}, + 'db_stat_temp_bytes': { + 'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'line'], + 'lines': [ + ['temp_bytes', 'size', 'incremental', 1, 1024] + ]}, + 'db_stat_temp_files': { + 'options': [None, 'Temp files written to disk', 'files', 'db statistics', 'postgres.db_stat_temp_files', 'line'], + 'lines': [ + ['temp_files', 'files', 'incremental'] + ]}, 'database_size': { 'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'], 'lines': [ @@ -208,17 +326,82 @@ CHARTS = { ['table_size', 'size', 'absolute', 1, 1024 * 1024] ]}, 'wal': { - 'options': [None, 'Write-Ahead Logging Statistics', 'files/s', 'write ahead log', 'postgres.wal', 'line'], + 'options': [None, 'Write-Ahead Logs', 'files', 'wal', 'postgres.wal', 'line'], + 'lines': [ + ['written_wal', 'written', 'absolute'], + ['recycled_wal', 'recycled', 'absolute'], + ['total_wal', 'total', 'absolute'] + ]}, + 'wal_writes': { + 'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'], + 'lines': [ + ['wal_writes', 'writes', 'incremental', 1, 1024] + ]}, + 'archive_wal': { + 'options': [None, 'Archive Write-Ahead Logs', 'files/s', 'archive wal', 'postgres.archive_wal', 'line'], 'lines': [ ['file_count', 'total', 'incremental'], ['ready_count', 'ready', 'incremental'], ['done_count', 'done', 'incremental'] ]}, - 'background_writer': { - 'options': [None, 'Checkpoints', 'writes/s', 'background writer', 'postgres.background_writer', 'line'], + 'checkpointer': { + 'options': [None, 'Checkpoints', 'writes', 'checkpointer', 'postgres.checkpointer', 'line'], + 'lines': [ + ['checkpoint_scheduled', 'scheduled', 'incremental'], + ['checkpoint_requested', 'requested', 'incremental'] + ]}, + 'stat_bgwriter_alloc': { + 'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], + 'lines': [ + ['buffers_alloc', 'alloc', 'incremental', 1, 1024] + ]}, + 'stat_bgwriter_checkpoint': { + 'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_checkpoint', 'line'], + 'lines': [ + ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024] + ]}, + 'stat_bgwriter_backend': { + 'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_backend', 'line'], + 'lines': [ + ['buffers_backend', 'backend', 'incremental', 1, 1024] + ]}, + 'stat_bgwriter_backend_fsync': { + 'options': [None, 'Fsync by backend', 'times', 'bgwriter', 'postgres.stat_bgwriter_backend_fsync', 'line'], + 'lines': [ + ['buffers_backend_fsync', 'backend fsync', 'incremental'] + ]}, + 'stat_bgwriter_bgwriter': { + 'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'postgres.bgwriter_bgwriter', 'line'], + 'lines': [ + ['buffers_clean', 'clean', 'incremental', 1, 1024] + ]}, + 'stat_bgwriter_maxwritten': { + 'options': [None, 'Too many buffers written', 'times', 'bgwriter', 'postgres.stat_bgwriter_maxwritten', 'line'], + 'lines': [ + ['maxwritten_clean', 'maxwritten', 'incremental'] + ]}, + 'autovacuum': { + 'options': [None, 'Autovacuum workers', 'workers', 'autovacuum', 'postgres.autovacuum', 'line'], 'lines': [ - ['writer_scheduled', 'scheduled', 'incremental'], - ['writer_requested', 'requested', 'incremental'] + ['analyze', 'analyze', 'absolute'], + ['vacuum', 'vacuum', 'absolute'], + ['vacuum_analyze', 'vacuum analyze', 'absolute'], + ['vacuum_freeze', 'vacuum freeze', 'absolute'], + ['brin_summarize', 'brin summarize', 'absolute'] + ]}, + 'standby_delta': { + 'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'], + 'lines': [ + ['sent_delta', 'sent delta', 'absolute', 1, 1024], + ['write_delta', 'write delta', 'absolute', 1, 1024], + ['flush_delta', 'flush delta', 'absolute', 1, 1024], + ['replay_delta', 'replay delta', 'absolute', 1, 1024] + ]}, + 'replication_slot': { + 'options': [None, 'Replication slot files', 'files', 'replication slot', 'postgres.replication_slot', 'line'], + 'lines': [ + ['replslot_wal_keep', 'wal keeped', 'absolute'], + ['replslot_files', 'pg_replslot files', 'absolute'] ]} } @@ -237,6 +420,8 @@ class Service(SimpleService): self.data = dict() self.locks_zeroed = dict() self.databases = list() + self.secondaries = list() + self.replication_slots = list() self.queries = QUERY_STATS.copy() def _connect(self): @@ -270,7 +455,10 @@ class Service(SimpleService): cursor = self.connection.cursor() self.databases = discover_databases_(cursor, QUERIES['FIND_DATABASES']) is_superuser = check_if_superuser_(cursor, QUERIES['IF_SUPERUSER']) + self.secondaries = discover_secondaries_(cursor, QUERIES['FIND_STANDBY']) self.server_version = detect_server_version(cursor, QUERIES['DETECT_SERVER_VERSION']) + if self.server_version >= 94000: + self.replication_slots = discover_replication_slots_(cursor, QUERIES['FIND_REPLICATION_SLOT']) cursor.close() if self.database_poll and isinstance(self.database_poll, str): @@ -286,29 +474,51 @@ class Service(SimpleService): return False def add_additional_queries_(self, is_superuser): + + if self.server_version >= 100000: + wal = 'wal' + lsn = 'lsn' + else: + wal = 'xlog' + lsn = 'location' + self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER'] + self.queries[QUERIES['DIFF_LSN'].format(wal,lsn)] = METRICS['WAL_WRITES'] + self.queries[QUERIES['STANDBY_DELTA'].format(wal,lsn)] = METRICS['STANDBY_DELTA'] + if self.index_stats: self.queries[QUERIES['INDEX_STATS']] = METRICS['INDEX_STATS'] if self.table_stats: self.queries[QUERIES['TABLE_STATS']] = METRICS['TABLE_STATS'] if is_superuser: - self.queries[QUERIES['BGWRITER']] = METRICS['BGWRITER'] + self.queries[QUERIES['ARCHIVE'].format(wal)] = METRICS['ARCHIVE'] + if self.server_version >= 90400: + self.queries[QUERIES['WAL'].format(wal,lsn)] = METRICS['WAL'] if self.server_version >= 100000: - wal_dir_name = 'pg_wal' - else: - wal_dir_name = 'pg_xlog' - self.queries[QUERIES['ARCHIVE'].format(wal_dir_name)] = METRICS['ARCHIVE'] + self.queries[QUERIES['REPSLOT_FILES']] = METRICS['REPSLOT_FILES'] + if self.server_version >= 90400: + self.queries[QUERIES['AUTOVACUUM']] = METRICS['AUTOVACUUM'] def create_dynamic_charts_(self): for database_name in self.databases[::-1]: self.definitions['database_size']['lines'].append([database_name + '_size', database_name, 'absolute', 1, 1024 * 1024]) - for chart_name in [name for name in CHARTS if name.startswith('db_stat')]: + for chart_name in [name for name in self.order if name.startswith('db_stat')]: add_database_stat_chart_(order=self.order, definitions=self.definitions, name=chart_name, database_name=database_name) add_database_lock_chart_(order=self.order, definitions=self.definitions, database_name=database_name) + for application_name in self.secondaries[::-1]: + add_replication_delta_chart_(order=self.order, definitions=self.definitions, + name='standby_delta', application_name=application_name) + + for slot_name in self.replication_slots[::-1]: + add_replication_slot_chart_(order=self.order, definitions=self.definitions, + name='replication_slot', slot_name=slot_name) + + + def _get_data(self): result, error = self._connect() if result: @@ -332,7 +542,14 @@ class Service(SimpleService): cursor.execute(query, dict(databases=tuple(self.databases))) for row in cursor: for metric in metrics: - dimension_id = '_'.join([row['database_name'], metric]) if 'database_name' in row else metric + if 'database_name' in row: + dimension_id = '_'.join([row['database_name'], metric]) + elif 'application_name' in row: + dimension_id = '_'.join([row['application_name'], metric]) + elif 'slot_name' in row: + dimension_id = '_'.join([row['slot_name'], metric]) + else: + dimension_id = metric if metric in row: self.data[dimension_id] = int(row[metric]) elif 'locks_count' in row: @@ -347,6 +564,21 @@ def discover_databases_(cursor, query): result.append(db) return result +def discover_secondaries_(cursor, query): + cursor.execute(query) + result = list() + for sc in [standby[0] for standby in cursor]: + if sc not in result: + result.append(sc) + return result + +def discover_replication_slots_(cursor, query): + cursor.execute(query) + result = list() + for slot in [replication_slot[0] for replication_slot in cursor]: + if slot not in result: + result.append(slot) + return result def check_if_superuser_(cursor, query): cursor.execute(query) @@ -398,3 +630,37 @@ def add_database_stat_chart_(order, definitions, name, database_name): definitions[chart_name] = { 'options': [name, title + ': ' + database_name, units, 'db ' + database_name, context, chart_type], 'lines': create_lines(database_name, chart_template['lines'])} + +def add_replication_delta_chart_(order, definitions, name, application_name): + def create_lines(standby, lines): + result = list() + for line in lines: + new_line = ['_'.join([standby, line[0]])] + line[1:] + result.append(new_line) + return result + + chart_template = CHARTS[name] + chart_name = '_'.join([application_name, name]) + position = order.index('database_size') + order.insert(position, chart_name) + name, title, units, family, context, chart_type = chart_template['options'] + definitions[chart_name] = { + 'options': [name, title + ': ' + application_name, units, 'replication delta', context, chart_type], + 'lines': create_lines(application_name, chart_template['lines'])} + +def add_replication_slot_chart_(order, definitions, name, slot_name): + def create_lines(slot, lines): + result = list() + for line in lines: + new_line = ['_'.join([slot, line[0]])] + line[1:] + result.append(new_line) + return result + + chart_template = CHARTS[name] + chart_name = '_'.join([slot_name, name]) + position = order.index('database_size') + order.insert(position, chart_name) + name, title, units, family, context, chart_type = chart_template['options'] + definitions[chart_name] = { + 'options': [name, title + ': ' + slot_name, units, 'replication slot files', context, chart_type], + 'lines': create_lines(slot_name, chart_template['lines'])} |