summaryrefslogtreecommitdiffstats
path: root/src/go/collectors/go.d.plugin/modules/postgres
diff options
context:
space:
mode:
Diffstat (limited to '')
l---------src/go/collectors/go.d.plugin/modules/postgres/README.md1
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/charts.go1400
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/collect.go266
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go367
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/config_schema.json141
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query.go78
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go73
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go55
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go160
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go285
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go59
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go170
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go94
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go147
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/init.go24
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md382
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml750
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/metrics.go231
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/postgres.go171
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go731
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/queries.go757
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/config.json14
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/config.yaml10
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt12
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt6
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt4
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt7
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt4
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt4
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt2
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt5
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt5
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt8
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt11
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt6
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt6
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt10
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt3
-rw-r--r--src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt10
53 files changed, 6511 insertions, 0 deletions
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/README.md b/src/go/collectors/go.d.plugin/modules/postgres/README.md
new file mode 120000
index 000000000..73b67b984
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/README.md
@@ -0,0 +1 @@
+integrations/postgresql.md \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/charts.go b/src/go/collectors/go.d.plugin/modules/postgres/charts.go
new file mode 100644
index 000000000..8003ab9f8
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/charts.go
@@ -0,0 +1,1400 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+)
+
+const (
+ prioConnectionsUtilization = module.Priority + iota
+ prioConnectionsUsage
+ prioConnectionsStateCount
+ prioDBConnectionsUtilization
+ prioDBConnectionsCount
+
+ prioTransactionsDuration
+ prioDBTransactionsRatio
+ prioDBTransactionsRate
+
+ prioQueriesDuration
+
+ prioDBOpsFetchedRowsRatio
+ prioDBOpsReadRowsRate
+ prioDBOpsWriteRowsRate
+ prioDBTempFilesCreatedRate
+ prioDBTempFilesIORate
+ prioTableOpsRowsRate
+ prioTableOpsRowsHOTRatio
+ prioTableOpsRowsHOTRate
+ prioTableScansRate
+ prioTableScansRowsRate
+
+ prioDBCacheIORatio
+ prioDBIORate
+ prioTableCacheIORatio
+ prioTableIORate
+ prioTableIndexCacheIORatio
+ prioTableIndexIORate
+ prioTableToastCacheIORatio
+ prioTableToastIORate
+ prioTableToastIndexCacheIORatio
+ prioTableToastIndexIORate
+
+ prioDBSize
+ prioTableTotalSize
+ prioIndexSize
+
+ prioTableBloatSizePerc
+ prioTableBloatSize
+ prioIndexBloatSizePerc
+ prioIndexBloatSize
+
+ prioLocksUtilization
+ prioDBLocksHeldCount
+ prioDBLocksAwaitedCount
+ prioDBDeadlocksRate
+
+ prioAutovacuumWorkersCount
+ prioTableAutovacuumSinceTime
+ prioTableVacuumSinceTime
+ prioTableAutoAnalyzeSinceTime
+ prioTableLastAnalyzeAgo
+
+ prioCheckpointsRate
+ prioCheckpointsTime
+ prioBGWriterHaltsRate
+ prioBuffersIORate
+ prioBuffersBackendFsyncRate
+ prioBuffersAllocRate
+ prioTXIDExhaustionTowardsAutovacuumPerc
+ prioTXIDExhaustionPerc
+ prioTXIDExhaustionOldestTXIDNum
+ prioTableRowsDeadRatio
+ prioTableRowsCount
+ prioTableNullColumns
+ prioIndexUsageStatus
+
+ prioReplicationAppWALLagSize
+ prioReplicationAppWALLagTime
+ prioReplicationSlotFilesCount
+ prioDBConflictsRate
+ prioDBConflictsReasonRate
+
+ prioWALIORate
+ prioWALFilesCount
+ prioWALArchivingFilesCount
+
+ prioDatabasesCount
+ prioCatalogRelationsCount
+ prioCatalogRelationsSize
+
+ prioUptime
+)
+
+var baseCharts = module.Charts{
+ serverConnectionsUtilizationChart.Copy(),
+ serverConnectionsUsageChart.Copy(),
+ serverConnectionsStateCount.Copy(),
+ locksUtilization.Copy(),
+ checkpointsChart.Copy(),
+ checkpointWriteChart.Copy(),
+ buffersIORateChart.Copy(),
+ buffersAllocRateChart.Copy(),
+ bgWriterHaltsRateChart.Copy(),
+ buffersBackendFsyncRateChart.Copy(),
+ walIORateChart.Copy(),
+ autovacuumWorkersCountChart.Copy(),
+ txidExhaustionTowardsAutovacuumPercChart.Copy(),
+ txidExhaustionPercChart.Copy(),
+ txidExhaustionOldestTXIDNumChart.Copy(),
+
+ catalogRelationSCountChart.Copy(),
+ catalogRelationsSizeChart.Copy(),
+ serverUptimeChart.Copy(),
+ databasesCountChart.Copy(),
+}
+
+var walFilesCharts = module.Charts{
+ walFilesCountChart.Copy(),
+ walArchivingFilesCountChart.Copy(),
+}
+
+func (p *Postgres) addWALFilesCharts() {
+ charts := walFilesCharts.Copy()
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+var (
+ serverConnectionsUtilizationChart = module.Chart{
+ ID: "connections_utilization",
+ Title: "Connections utilization",
+ Units: "percentage",
+ Fam: "connections",
+ Ctx: "postgres.connections_utilization",
+ Priority: prioConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "server_connections_utilization", Name: "used"},
+ },
+ }
+ serverConnectionsUsageChart = module.Chart{
+ ID: "connections_usage",
+ Title: "Connections usage",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.connections_usage",
+ Priority: prioConnectionsUsage,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "server_connections_available", Name: "available"},
+ {ID: "server_connections_used", Name: "used"},
+ },
+ }
+ serverConnectionsStateCount = module.Chart{
+ ID: "connections_state",
+ Title: "Connections in each state",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.connections_state_count",
+ Priority: prioConnectionsStateCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "server_connections_state_active", Name: "active"},
+ {ID: "server_connections_state_idle", Name: "idle"},
+ {ID: "server_connections_state_idle_in_transaction", Name: "idle_in_transaction"},
+ {ID: "server_connections_state_idle_in_transaction_aborted", Name: "idle_in_transaction_aborted"},
+ {ID: "server_connections_state_fastpath_function_call", Name: "fastpath_function_call"},
+ {ID: "server_connections_state_disabled", Name: "disabled"},
+ },
+ }
+
+ locksUtilization = module.Chart{
+ ID: "locks_utilization",
+ Title: "Acquired locks utilization",
+ Units: "percentage",
+ Fam: "locks",
+ Ctx: "postgres.locks_utilization",
+ Priority: prioLocksUtilization,
+ Dims: module.Dims{
+ {ID: "locks_utilization", Name: "used"},
+ },
+ }
+
+ checkpointsChart = module.Chart{
+ ID: "checkpoints_rate",
+ Title: "Checkpoints",
+ Units: "checkpoints/s",
+ Fam: "maintenance",
+ Ctx: "postgres.checkpoints_rate",
+ Priority: prioCheckpointsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "checkpoints_timed", Name: "scheduled", Algo: module.Incremental},
+ {ID: "checkpoints_req", Name: "requested", Algo: module.Incremental},
+ },
+ }
+ // TODO: should be seconds, also it is units/s when using incremental...
+ checkpointWriteChart = module.Chart{
+ ID: "checkpoints_time",
+ Title: "Checkpoint time",
+ Units: "milliseconds",
+ Fam: "maintenance",
+ Ctx: "postgres.checkpoints_time",
+ Priority: prioCheckpointsTime,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "checkpoint_write_time", Name: "write", Algo: module.Incremental},
+ {ID: "checkpoint_sync_time", Name: "sync", Algo: module.Incremental},
+ },
+ }
+ bgWriterHaltsRateChart = module.Chart{
+ ID: "bgwriter_halts_rate",
+ Title: "Background writer scan halts",
+ Units: "halts/s",
+ Fam: "maintenance",
+ Ctx: "postgres.bgwriter_halts_rate",
+ Priority: prioBGWriterHaltsRate,
+ Dims: module.Dims{
+ {ID: "maxwritten_clean", Name: "maxwritten", Algo: module.Incremental},
+ },
+ }
+
+ buffersIORateChart = module.Chart{
+ ID: "buffers_io_rate",
+ Title: "Buffers written rate",
+ Units: "B/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_io_rate",
+ Priority: prioBuffersIORate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "buffers_checkpoint", Name: "checkpoint", Algo: module.Incremental},
+ {ID: "buffers_backend", Name: "backend", Algo: module.Incremental},
+ {ID: "buffers_clean", Name: "bgwriter", Algo: module.Incremental},
+ },
+ }
+ buffersBackendFsyncRateChart = module.Chart{
+ ID: "buffers_backend_fsync_rate",
+ Title: "Backend fsync calls",
+ Units: "calls/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_backend_fsync_rate",
+ Priority: prioBuffersBackendFsyncRate,
+ Dims: module.Dims{
+ {ID: "buffers_backend_fsync", Name: "fsync", Algo: module.Incremental},
+ },
+ }
+ buffersAllocRateChart = module.Chart{
+ ID: "buffers_alloc_rate",
+ Title: "Buffers allocated",
+ Units: "B/s",
+ Fam: "maintenance",
+ Ctx: "postgres.buffers_allocated_rate",
+ Priority: prioBuffersAllocRate,
+ Dims: module.Dims{
+ {ID: "buffers_alloc", Name: "allocated", Algo: module.Incremental},
+ },
+ }
+
+ walIORateChart = module.Chart{
+ ID: "wal_io_rate",
+ Title: "Write-Ahead Log writes",
+ Units: "B/s",
+ Fam: "wal",
+ Ctx: "postgres.wal_io_rate",
+ Priority: prioWALIORate,
+ Dims: module.Dims{
+ {ID: "wal_writes", Name: "written", Algo: module.Incremental},
+ },
+ }
+ walFilesCountChart = module.Chart{
+ ID: "wal_files_count",
+ Title: "Write-Ahead Log files",
+ Units: "files",
+ Fam: "wal",
+ Ctx: "postgres.wal_files_count",
+ Priority: prioWALFilesCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wal_written_files", Name: "written"},
+ {ID: "wal_recycled_files", Name: "recycled"},
+ },
+ }
+
+ walArchivingFilesCountChart = module.Chart{
+ ID: "wal_archiving_files_count",
+ Title: "Write-Ahead Log archived files",
+ Units: "files/s",
+ Fam: "wal",
+ Ctx: "postgres.wal_archiving_files_count",
+ Priority: prioWALArchivingFilesCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "wal_archive_files_ready_count", Name: "ready"},
+ {ID: "wal_archive_files_done_count", Name: "done"},
+ },
+ }
+
+ autovacuumWorkersCountChart = module.Chart{
+ ID: "autovacuum_workers_count",
+ Title: "Autovacuum workers",
+ Units: "workers",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.autovacuum_workers_count",
+ Priority: prioAutovacuumWorkersCount,
+ Dims: module.Dims{
+ {ID: "autovacuum_analyze", Name: "analyze"},
+ {ID: "autovacuum_vacuum_analyze", Name: "vacuum_analyze"},
+ {ID: "autovacuum_vacuum", Name: "vacuum"},
+ {ID: "autovacuum_vacuum_freeze", Name: "vacuum_freeze"},
+ {ID: "autovacuum_brin_summarize", Name: "brin_summarize"},
+ },
+ }
+
+ txidExhaustionTowardsAutovacuumPercChart = module.Chart{
+ ID: "txid_exhaustion_towards_autovacuum_perc",
+ Title: "Percent towards emergency autovacuum",
+ Units: "percentage",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_towards_autovacuum_perc",
+ Priority: prioTXIDExhaustionTowardsAutovacuumPerc,
+ Dims: module.Dims{
+ {ID: "percent_towards_emergency_autovacuum", Name: "emergency_autovacuum"},
+ },
+ }
+ txidExhaustionPercChart = module.Chart{
+ ID: "txid_exhaustion_perc",
+ Title: "Percent towards transaction ID wraparound",
+ Units: "percentage",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_perc",
+ Priority: prioTXIDExhaustionPerc,
+ Dims: module.Dims{
+ {ID: "percent_towards_wraparound", Name: "txid_exhaustion"},
+ },
+ }
+ txidExhaustionOldestTXIDNumChart = module.Chart{
+ ID: "txid_exhaustion_oldest_txid_num",
+ Title: "Oldest transaction XID",
+ Units: "xid",
+ Fam: "maintenance",
+ Ctx: "postgres.txid_exhaustion_oldest_txid_num",
+ Priority: prioTXIDExhaustionOldestTXIDNum,
+ Dims: module.Dims{
+ {ID: "oldest_current_xid", Name: "xid"},
+ },
+ }
+
+ catalogRelationSCountChart = module.Chart{
+ ID: "catalog_relations_count",
+ Title: "Relation count",
+ Units: "relations",
+ Fam: "catalog",
+ Ctx: "postgres.catalog_relations_count",
+ Priority: prioCatalogRelationsCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "catalog_relkind_r_count", Name: "ordinary_table"},
+ {ID: "catalog_relkind_i_count", Name: "index"},
+ {ID: "catalog_relkind_S_count", Name: "sequence"},
+ {ID: "catalog_relkind_t_count", Name: "toast_table"},
+ {ID: "catalog_relkind_v_count", Name: "view"},
+ {ID: "catalog_relkind_m_count", Name: "materialized_view"},
+ {ID: "catalog_relkind_c_count", Name: "composite_type"},
+ {ID: "catalog_relkind_f_count", Name: "foreign_table"},
+ {ID: "catalog_relkind_p_count", Name: "partitioned_table"},
+ {ID: "catalog_relkind_I_count", Name: "partitioned_index"},
+ },
+ }
+ catalogRelationsSizeChart = module.Chart{
+ ID: "catalog_relations_size",
+ Title: "Relation size",
+ Units: "B",
+ Fam: "catalog",
+ Ctx: "postgres.catalog_relations_size",
+ Priority: prioCatalogRelationsSize,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "catalog_relkind_r_size", Name: "ordinary_table"},
+ {ID: "catalog_relkind_i_size", Name: "index"},
+ {ID: "catalog_relkind_S_size", Name: "sequence"},
+ {ID: "catalog_relkind_t_size", Name: "toast_table"},
+ {ID: "catalog_relkind_v_size", Name: "view"},
+ {ID: "catalog_relkind_m_size", Name: "materialized_view"},
+ {ID: "catalog_relkind_c_size", Name: "composite_type"},
+ {ID: "catalog_relkind_f_size", Name: "foreign_table"},
+ {ID: "catalog_relkind_p_size", Name: "partitioned_table"},
+ {ID: "catalog_relkind_I_size", Name: "partitioned_index"},
+ },
+ }
+
+ serverUptimeChart = module.Chart{
+ ID: "server_uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "postgres.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "server_uptime", Name: "uptime"},
+ },
+ }
+
+ databasesCountChart = module.Chart{
+ ID: "databases_count",
+ Title: "Number of databases",
+ Units: "databases",
+ Fam: "catalog",
+ Ctx: "postgres.databases_count",
+ Priority: prioDatabasesCount,
+ Dims: module.Dims{
+ {ID: "databases_count", Name: "databases"},
+ },
+ }
+
+ transactionsDurationChartTmpl = module.Chart{
+ ID: "transactions_duration",
+ Title: "Observed transactions time",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "postgres.transactions_duration",
+ Priority: prioTransactionsDuration,
+ Type: module.Stacked,
+ }
+ queriesDurationChartTmpl = module.Chart{
+ ID: "queries_duration",
+ Title: "Observed active queries time",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "postgres.queries_duration",
+ Priority: prioQueriesDuration,
+ Type: module.Stacked,
+ }
+)
+
+func newRunningTimeHistogramChart(tmpl module.Chart, prefix string, buckets []float64) (*module.Chart, error) {
+ chart := tmpl.Copy()
+
+ for i, v := range buckets {
+ dim := &module.Dim{
+ ID: fmt.Sprintf("%s_hist_bucket_%d", prefix, i+1),
+ Name: time.Duration(v * float64(time.Second)).String(),
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+ }
+
+ dim := &module.Dim{
+ ID: fmt.Sprintf("%s_hist_bucket_inf", prefix),
+ Name: "+Inf",
+ Algo: module.Incremental,
+ }
+ if err := chart.AddDim(dim); err != nil {
+ return nil, err
+ }
+
+ return chart, nil
+}
+
+func (p *Postgres) addTransactionsRunTimeHistogramChart() {
+ chart, err := newRunningTimeHistogramChart(
+ transactionsDurationChartTmpl,
+ "transaction_running_time",
+ p.XactTimeHistogram,
+ )
+ if err != nil {
+ p.Warning(err)
+ return
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addQueriesRunTimeHistogramChart() {
+ chart, err := newRunningTimeHistogramChart(
+ queriesDurationChartTmpl,
+ "query_running_time",
+ p.QueryTimeHistogram,
+ )
+ if err != nil {
+ p.Warning(err)
+ return
+ }
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+var (
+ replicationStandbyAppCharts = module.Charts{
+ replicationAppWALLagSizeChartTmpl.Copy(),
+ replicationAppWALLagTimeChartTmpl.Copy(),
+ }
+ replicationAppWALLagSizeChartTmpl = module.Chart{
+ ID: "replication_app_%s_wal_lag_size",
+ Title: "Standby application WAL lag size",
+ Units: "B",
+ Fam: "replication",
+ Ctx: "postgres.replication_app_wal_lag_size",
+ Priority: prioReplicationAppWALLagSize,
+ Dims: module.Dims{
+ {ID: "repl_standby_app_%s_wal_sent_lag_size", Name: "sent_lag"},
+ {ID: "repl_standby_app_%s_wal_write_lag_size", Name: "write_lag"},
+ {ID: "repl_standby_app_%s_wal_flush_lag_size", Name: "flush_lag"},
+ {ID: "repl_standby_app_%s_wal_replay_lag_size", Name: "replay_lag"},
+ },
+ }
+ replicationAppWALLagTimeChartTmpl = module.Chart{
+ ID: "replication_app_%s_wal_lag_time",
+ Title: "Standby application WAL lag time",
+ Units: "seconds",
+ Fam: "replication",
+ Ctx: "postgres.replication_app_wal_lag_time",
+ Priority: prioReplicationAppWALLagTime,
+ Dims: module.Dims{
+ {ID: "repl_standby_app_%s_wal_write_lag_time", Name: "write_lag"},
+ {ID: "repl_standby_app_%s_wal_flush_lag_time", Name: "flush_lag"},
+ {ID: "repl_standby_app_%s_wal_replay_lag_time", Name: "replay_lag"},
+ },
+ }
+)
+
+func newReplicationStandbyAppCharts(app string) *module.Charts {
+ charts := replicationStandbyAppCharts.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, app)
+ c.Labels = []module.Label{
+ {Key: "application", Value: app},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, app)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewReplicationStandbyAppCharts(app string) {
+ charts := newReplicationStandbyAppCharts(app)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeReplicationStandbyAppCharts(app string) {
+ prefix := fmt.Sprintf("replication_standby_app_%s_", app)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ replicationSlotCharts = module.Charts{
+ replicationSlotFilesCountChartTmpl.Copy(),
+ }
+ replicationSlotFilesCountChartTmpl = module.Chart{
+ ID: "replication_slot_%s_files_count",
+ Title: "Replication slot files",
+ Units: "files",
+ Fam: "replication",
+ Ctx: "postgres.replication_slot_files_count",
+ Priority: prioReplicationSlotFilesCount,
+ Dims: module.Dims{
+ {ID: "repl_slot_%s_replslot_wal_keep", Name: "wal_keep"},
+ {ID: "repl_slot_%s_replslot_files", Name: "pg_replslot_files"},
+ },
+ }
+)
+
+func newReplicationSlotCharts(slot string) *module.Charts {
+ charts := replicationSlotCharts.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, slot)
+ c.Labels = []module.Label{
+ {Key: "slot", Value: slot},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, slot)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewReplicationSlotCharts(slot string) {
+ charts := newReplicationSlotCharts(slot)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeReplicationSlotCharts(slot string) {
+ prefix := fmt.Sprintf("replication_slot_%s_", slot)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ dbChartsTmpl = module.Charts{
+ dbTransactionsRatioChartTmpl.Copy(),
+ dbTransactionsRateChartTmpl.Copy(),
+ dbConnectionsUtilizationChartTmpl.Copy(),
+ dbConnectionsCountChartTmpl.Copy(),
+ dbCacheIORatioChartTmpl.Copy(),
+ dbIORateChartTmpl.Copy(),
+ dbOpsFetchedRowsRatioChartTmpl.Copy(),
+ dbOpsReadRowsRateChartTmpl.Copy(),
+ dbOpsWriteRowsRateChartTmpl.Copy(),
+ dbDeadlocksRateChartTmpl.Copy(),
+ dbLocksHeldCountChartTmpl.Copy(),
+ dbLocksAwaitedCountChartTmpl.Copy(),
+ dbTempFilesCreatedRateChartTmpl.Copy(),
+ dbTempFilesIORateChartTmpl.Copy(),
+ dbSizeChartTmpl.Copy(),
+ }
+ dbTransactionsRatioChartTmpl = module.Chart{
+ ID: "db_%s_transactions_ratio",
+ Title: "Database transactions ratio",
+ Units: "percentage",
+ Fam: "transactions",
+ Ctx: "postgres.db_transactions_ratio",
+ Priority: prioDBTransactionsRatio,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_xact_commit", Name: "committed", Algo: module.PercentOfIncremental},
+ {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.PercentOfIncremental},
+ },
+ }
+ dbTransactionsRateChartTmpl = module.Chart{
+ ID: "db_%s_transactions_rate",
+ Title: "Database transactions",
+ Units: "transactions/s",
+ Fam: "transactions",
+ Ctx: "postgres.db_transactions_rate",
+ Priority: prioDBTransactionsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_xact_commit", Name: "committed", Algo: module.Incremental},
+ {ID: "db_%s_xact_rollback", Name: "rollback", Algo: module.Incremental},
+ },
+ }
+ dbConnectionsUtilizationChartTmpl = module.Chart{
+ ID: "db_%s_connections_utilization",
+ Title: "Database connections utilization",
+ Units: "percentage",
+ Fam: "connections",
+ Ctx: "postgres.db_connections_utilization",
+ Priority: prioDBConnectionsUtilization,
+ Dims: module.Dims{
+ {ID: "db_%s_numbackends_utilization", Name: "used"},
+ },
+ }
+ dbConnectionsCountChartTmpl = module.Chart{
+ ID: "db_%s_connections",
+ Title: "Database connections",
+ Units: "connections",
+ Fam: "connections",
+ Ctx: "postgres.db_connections_count",
+ Priority: prioDBConnectionsCount,
+ Dims: module.Dims{
+ {ID: "db_%s_numbackends", Name: "connections"},
+ },
+ }
+ dbCacheIORatioChartTmpl = module.Chart{
+ ID: "db_%s_cache_io_ratio",
+ Title: "Database buffer cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.db_cache_io_ratio",
+ Priority: prioDBCacheIORatio,
+ Dims: module.Dims{
+ {ID: "db_%s_blks_read_perc", Name: "miss"},
+ },
+ }
+ dbIORateChartTmpl = module.Chart{
+ ID: "db_%s_io_rate",
+ Title: "Database reads",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.db_io_rate",
+ Priority: prioDBIORate,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "db_%s_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "db_%s_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ dbOpsFetchedRowsRatioChartTmpl = module.Chart{
+ ID: "db_%s_db_ops_fetched_rows_ratio",
+ Title: "Database rows fetched ratio",
+ Units: "percentage",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_fetched_rows_ratio",
+ Priority: prioDBOpsFetchedRowsRatio,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_fetched_perc", Name: "fetched"},
+ },
+ }
+ dbOpsReadRowsRateChartTmpl = module.Chart{
+ ID: "db_%s_ops_read_rows_rate",
+ Title: "Database rows read",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_read_rows_rate",
+ Priority: prioDBOpsReadRowsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_returned", Name: "returned", Algo: module.Incremental},
+ {ID: "db_%s_tup_fetched", Name: "fetched", Algo: module.Incremental},
+ },
+ }
+ dbOpsWriteRowsRateChartTmpl = module.Chart{
+ ID: "db_%s_ops_write_rows_rate",
+ Title: "Database rows written",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_ops_write_rows_rate",
+ Priority: prioDBOpsWriteRowsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_tup_inserted", Name: "inserted", Algo: module.Incremental},
+ {ID: "db_%s_tup_deleted", Name: "deleted", Algo: module.Incremental},
+ {ID: "db_%s_tup_updated", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ dbConflictsRateChartTmpl = module.Chart{
+ ID: "db_%s_conflicts_rate",
+ Title: "Database canceled queries",
+ Units: "queries/s",
+ Fam: "replication",
+ Ctx: "postgres.db_conflicts_rate",
+ Priority: prioDBConflictsRate,
+ Dims: module.Dims{
+ {ID: "db_%s_conflicts", Name: "conflicts", Algo: module.Incremental},
+ },
+ }
+ dbConflictsReasonRateChartTmpl = module.Chart{
+ ID: "db_%s_conflicts_reason_rate",
+ Title: "Database canceled queries by reason",
+ Units: "queries/s",
+ Fam: "replication",
+ Ctx: "postgres.db_conflicts_reason_rate",
+ Priority: prioDBConflictsReasonRate,
+ Dims: module.Dims{
+ {ID: "db_%s_confl_tablespace", Name: "tablespace", Algo: module.Incremental},
+ {ID: "db_%s_confl_lock", Name: "lock", Algo: module.Incremental},
+ {ID: "db_%s_confl_snapshot", Name: "snapshot", Algo: module.Incremental},
+ {ID: "db_%s_confl_bufferpin", Name: "bufferpin", Algo: module.Incremental},
+ {ID: "db_%s_confl_deadlock", Name: "deadlock", Algo: module.Incremental},
+ },
+ }
+ dbDeadlocksRateChartTmpl = module.Chart{
+ ID: "db_%s_deadlocks_rate",
+ Title: "Database deadlocks",
+ Units: "deadlocks/s",
+ Fam: "locks",
+ Ctx: "postgres.db_deadlocks_rate",
+ Priority: prioDBDeadlocksRate,
+ Dims: module.Dims{
+ {ID: "db_%s_deadlocks", Name: "deadlocks", Algo: module.Incremental},
+ },
+ }
+ dbLocksHeldCountChartTmpl = module.Chart{
+ ID: "db_%s_locks_held",
+ Title: "Database locks held",
+ Units: "locks",
+ Fam: "locks",
+ Ctx: "postgres.db_locks_held_count",
+ Priority: prioDBLocksHeldCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_lock_mode_AccessShareLock_held", Name: "access_share"},
+ {ID: "db_%s_lock_mode_RowShareLock_held", Name: "row_share"},
+ {ID: "db_%s_lock_mode_RowExclusiveLock_held", Name: "row_exclusive"},
+ {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_held", Name: "share_update"},
+ {ID: "db_%s_lock_mode_ShareLock_held", Name: "share"},
+ {ID: "db_%s_lock_mode_ShareRowExclusiveLock_held", Name: "share_row_exclusive"},
+ {ID: "db_%s_lock_mode_ExclusiveLock_held", Name: "exclusive"},
+ {ID: "db_%s_lock_mode_AccessExclusiveLock_held", Name: "access_exclusive"},
+ },
+ }
+ dbLocksAwaitedCountChartTmpl = module.Chart{
+ ID: "db_%s_locks_awaited_count",
+ Title: "Database locks awaited",
+ Units: "locks",
+ Fam: "locks",
+ Ctx: "postgres.db_locks_awaited_count",
+ Priority: prioDBLocksAwaitedCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "db_%s_lock_mode_AccessShareLock_awaited", Name: "access_share"},
+ {ID: "db_%s_lock_mode_RowShareLock_awaited", Name: "row_share"},
+ {ID: "db_%s_lock_mode_RowExclusiveLock_awaited", Name: "row_exclusive"},
+ {ID: "db_%s_lock_mode_ShareUpdateExclusiveLock_awaited", Name: "share_update"},
+ {ID: "db_%s_lock_mode_ShareLock_awaited", Name: "share"},
+ {ID: "db_%s_lock_mode_ShareRowExclusiveLock_awaited", Name: "share_row_exclusive"},
+ {ID: "db_%s_lock_mode_ExclusiveLock_awaited", Name: "exclusive"},
+ {ID: "db_%s_lock_mode_AccessExclusiveLock_awaited", Name: "access_exclusive"},
+ },
+ }
+ dbTempFilesCreatedRateChartTmpl = module.Chart{
+ ID: "db_%s_temp_files_files_created_rate",
+ Title: "Database created temporary files",
+ Units: "files/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_temp_files_created_rate",
+ Priority: prioDBTempFilesCreatedRate,
+ Dims: module.Dims{
+ {ID: "db_%s_temp_files", Name: "created", Algo: module.Incremental},
+ },
+ }
+ dbTempFilesIORateChartTmpl = module.Chart{
+ ID: "db_%s_temp_files_io_rate",
+ Title: "Database temporary files data written to disk",
+ Units: "B/s",
+ Fam: "throughput",
+ Ctx: "postgres.db_temp_files_io_rate",
+ Priority: prioDBTempFilesIORate,
+ Dims: module.Dims{
+ {ID: "db_%s_temp_bytes", Name: "written", Algo: module.Incremental},
+ },
+ }
+ dbSizeChartTmpl = module.Chart{
+ ID: "db_%s_size",
+ Title: "Database size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.db_size",
+ Priority: prioDBSize,
+ Dims: module.Dims{
+ {ID: "db_%s_size", Name: "size"},
+ },
+ }
+)
+
+func (p *Postgres) addDBConflictsCharts(db *dbMetrics) {
+ tmpl := module.Charts{
+ dbConflictsRateChartTmpl.Copy(),
+ dbConflictsReasonRateChartTmpl.Copy(),
+ }
+ charts := newDatabaseCharts(tmpl.Copy(), db)
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func newDatabaseCharts(tmpl *module.Charts, db *dbMetrics) *module.Charts {
+ charts := tmpl.Copy()
+ for _, c := range *charts {
+ c.ID = fmt.Sprintf(c.ID, db.name)
+ c.Labels = []module.Label{
+ {Key: "database", Value: db.name},
+ }
+ for _, d := range c.Dims {
+ d.ID = fmt.Sprintf(d.ID, db.name)
+ }
+ }
+ return charts
+}
+
+func (p *Postgres) addNewDatabaseCharts(db *dbMetrics) {
+ charts := newDatabaseCharts(dbChartsTmpl.Copy(), db)
+
+ if db.size == nil {
+ _ = charts.Remove(fmt.Sprintf(dbSizeChartTmpl.ID, db.name))
+ }
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeDatabaseCharts(db *dbMetrics) {
+ prefix := fmt.Sprintf("db_%s_", db.name)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ tableChartsTmpl = module.Charts{
+ tableRowsCountChartTmpl.Copy(),
+ tableDeadRowsDeadRatioChartTmpl.Copy(),
+ tableOpsRowsRateChartTmpl.Copy(),
+ tableOpsRowsHOTRatioChartTmpl.Copy(),
+ tableOpsRowsHOTRateChartTmpl.Copy(),
+ tableScansRateChartTmpl.Copy(),
+ tableScansRowsRateChartTmpl.Copy(),
+ tableNullColumnsCountChartTmpl.Copy(),
+ tableTotalSizeChartTmpl.Copy(),
+ tableBloatSizePercChartTmpl.Copy(),
+ tableBloatSizeChartTmpl.Copy(),
+ }
+
+ tableDeadRowsDeadRatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_rows_dead_ratio",
+ Title: "Table dead rows",
+ Units: "%",
+ Fam: "maintenance",
+ Ctx: "postgres.table_rows_dead_ratio",
+ Priority: prioTableRowsDeadRatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_dead_tup_perc", Name: "dead"},
+ },
+ }
+ tableRowsCountChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_rows_count",
+ Title: "Table total rows",
+ Units: "rows",
+ Fam: "maintenance",
+ Ctx: "postgres.table_rows_count",
+ Priority: prioTableRowsCount,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_live_tup", Name: "live"},
+ {ID: "table_%s_db_%s_schema_%s_n_dead_tup", Name: "dead"},
+ },
+ }
+ tableOpsRowsRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_rate",
+ Title: "Table throughput",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_rate",
+ Priority: prioTableOpsRowsRate,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_ins", Name: "inserted", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_n_tup_del", Name: "deleted", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_n_tup_upd", Name: "updated", Algo: module.Incremental},
+ },
+ }
+ tableOpsRowsHOTRatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_hot_ratio",
+ Title: "Table HOT updates ratio",
+ Units: "percentage",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_hot_ratio",
+ Priority: prioTableOpsRowsHOTRatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd_perc", Name: "hot"},
+ },
+ }
+ tableOpsRowsHOTRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_ops_rows_hot_rate",
+ Title: "Table HOT updates",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_ops_rows_hot_rate",
+ Priority: prioTableOpsRowsHOTRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_n_tup_hot_upd", Name: "hot", Algo: module.Incremental},
+ },
+ }
+ tableCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_cache_io_ratio",
+ Title: "Table I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_cache_io_ratio",
+ Priority: prioTableCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_read_perc", Name: "miss"},
+ },
+ }
+ tableIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_io_rate",
+ Title: "Table I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_io_rate",
+ Priority: prioTableIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_heap_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableIndexCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_index_cache_io_ratio",
+ Title: "Table index I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_index_cache_io_ratio",
+ Priority: prioTableIndexCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableIndexIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_index_io_rate",
+ Title: "Table index I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_index_io_rate",
+ Priority: prioTableIndexIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_idx_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableTOASCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_cache_io_ratio",
+ Title: "Table TOAST I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_cache_io_ratio",
+ Priority: prioTableToastCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_io_rate",
+ Title: "Table TOAST I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_io_rate",
+ Priority: prioTableToastIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_toast_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIndexCacheIORatioChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_index_cache_io_ratio",
+ Title: "Table TOAST index I/O cache miss ratio",
+ Units: "percentage",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_index_cache_io_ratio",
+ Priority: prioTableToastIndexCacheIORatio,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_read_perc", Name: "miss", Algo: module.Incremental},
+ },
+ }
+ tableTOASTIndexIORateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_toast_index_io_rate",
+ Title: "Table TOAST index I/O",
+ Units: "B/s",
+ Fam: "cache",
+ Ctx: "postgres.table_toast_index_io_rate",
+ Priority: prioTableToastIndexIORate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_hit", Name: "memory", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_tidx_blks_read", Name: "disk", Algo: module.Incremental},
+ },
+ }
+ tableScansRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_scans_rate",
+ Title: "Table scans",
+ Units: "scans/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_scans_rate",
+ Priority: prioTableScansRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_scan", Name: "index", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_seq_scan", Name: "sequential", Algo: module.Incremental},
+ },
+ }
+ tableScansRowsRateChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_scans_rows_rate",
+ Title: "Table live rows fetched by scans",
+ Units: "rows/s",
+ Fam: "throughput",
+ Ctx: "postgres.table_scans_rows_rate",
+ Priority: prioTableScansRowsRate,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_idx_tup_fetch", Name: "index", Algo: module.Incremental},
+ {ID: "table_%s_db_%s_schema_%s_seq_tup_read", Name: "sequential", Algo: module.Incremental},
+ },
+ }
+ tableAutoVacuumSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_autovacuum_since_time",
+ Title: "Table time since last auto VACUUM",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_autovacuum_since_time",
+ Priority: prioTableAutovacuumSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_autovacuum_ago", Name: "time"},
+ },
+ }
+ tableVacuumSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_vacuum_since_time",
+ Title: "Table time since last manual VACUUM",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_vacuum_since_time",
+ Priority: prioTableVacuumSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_vacuum_ago", Name: "time"},
+ },
+ }
+ tableAutoAnalyzeSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_autoanalyze_since_time",
+ Title: "Table time since last auto ANALYZE",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_autoanalyze_since_time",
+ Priority: prioTableAutoAnalyzeSinceTime,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_autoanalyze_ago", Name: "time"},
+ },
+ }
+ tableAnalyzeSinceTimeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_analyze_since_time",
+ Title: "Table time since last manual ANALYZE",
+ Units: "seconds",
+ Fam: "vacuum and analyze",
+ Ctx: "postgres.table_analyze_since_time",
+ Priority: prioTableLastAnalyzeAgo,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_last_analyze_ago", Name: "time"},
+ },
+ }
+ tableNullColumnsCountChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_null_columns_count",
+ Title: "Table null columns",
+ Units: "columns",
+ Fam: "maintenance",
+ Ctx: "postgres.table_null_columns_count",
+ Priority: prioTableNullColumns,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_null_columns", Name: "null"},
+ },
+ }
+ tableTotalSizeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_total_size",
+ Title: "Table total size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.table_total_size",
+ Priority: prioTableTotalSize,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_total_size", Name: "size"},
+ },
+ }
+ tableBloatSizePercChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_bloat_size_perc",
+ Title: "Table bloat size percentage",
+ Units: "percentage",
+ Fam: "bloat",
+ Ctx: "postgres.table_bloat_size_perc",
+ Priority: prioTableBloatSizePerc,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"},
+ },
+ Vars: module.Vars{
+ {ID: "table_%s_db_%s_schema_%s_total_size", Name: "table_size"},
+ },
+ }
+ tableBloatSizeChartTmpl = module.Chart{
+ ID: "table_%s_db_%s_schema_%s_bloat_size",
+ Title: "Table bloat size",
+ Units: "B",
+ Fam: "bloat",
+ Ctx: "postgres.table_bloat_size",
+ Priority: prioTableBloatSize,
+ Dims: module.Dims{
+ {ID: "table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"},
+ },
+ }
+)
+
+func newTableCharts(tbl *tableMetrics) *module.Charts {
+ charts := tableChartsTmpl.Copy()
+
+ if tbl.bloatSize == nil {
+ _ = charts.Remove(tableBloatSizeChartTmpl.ID)
+ _ = charts.Remove(tableBloatSizePercChartTmpl.ID)
+ }
+
+ for i, chart := range *charts {
+ (*charts)[i] = newTableChart(chart, tbl)
+ }
+
+ return charts
+}
+
+func newTableChart(chart *module.Chart, tbl *tableMetrics) *module.Chart {
+ chart = chart.Copy()
+ chart.ID = fmt.Sprintf(chart.ID, tbl.name, tbl.db, tbl.schema)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: tbl.db},
+ {Key: "schema", Value: tbl.schema},
+ {Key: "table", Value: tbl.name},
+ {Key: "parent_table", Value: tbl.parentName},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, tbl.name, tbl.db, tbl.schema)
+ }
+ for _, v := range chart.Vars {
+ v.ID = fmt.Sprintf(v.ID, tbl.name, tbl.db, tbl.schema)
+ }
+ return chart
+}
+
+func (p *Postgres) addNewTableCharts(tbl *tableMetrics) {
+ charts := newTableCharts(tbl)
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAutoVacuumAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAutoVacuumSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastVacuumAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableVacuumSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAutoAnalyzeAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAutoAnalyzeSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableLastAnalyzeAgoChart(tbl *tableMetrics) {
+ chart := newTableChart(tableAnalyzeSinceTimeChartTmpl.Copy(), tbl)
+
+ if err := p.Charts().Add(chart); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableIOChartsCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableIndexIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableIndexCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableIndexIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableTOASTIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableTOASCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableTOASTIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) addTableTOASTIndexIOCharts(tbl *tableMetrics) {
+ charts := module.Charts{
+ newTableChart(tableTOASTIndexCacheIORatioChartTmpl.Copy(), tbl),
+ newTableChart(tableTOASTIndexIORateChartTmpl.Copy(), tbl),
+ }
+
+ if err := p.Charts().Add(charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeTableCharts(tbl *tableMetrics) {
+ prefix := fmt.Sprintf("table_%s_db_%s_schema_%s", tbl.name, tbl.db, tbl.schema)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
+
+var (
+ indexChartsTmpl = module.Charts{
+ indexSizeChartTmpl.Copy(),
+ indexBloatSizePercChartTmpl.Copy(),
+ indexBloatSizeChartTmpl.Copy(),
+ indexUsageStatusChartTmpl.Copy(),
+ }
+ indexSizeChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_size",
+ Title: "Index size",
+ Units: "B",
+ Fam: "size",
+ Ctx: "postgres.index_size",
+ Priority: prioIndexSize,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "size"},
+ },
+ }
+ indexBloatSizePercChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc",
+ Title: "Index bloat size percentage",
+ Units: "percentage",
+ Fam: "bloat",
+ Ctx: "postgres.index_bloat_size_perc",
+ Priority: prioIndexBloatSizePerc,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size_perc", Name: "bloat"},
+ },
+ Vars: module.Vars{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_size", Name: "index_size"},
+ },
+ }
+ indexBloatSizeChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size",
+ Title: "Index bloat size",
+ Units: "B",
+ Fam: "bloat",
+ Ctx: "postgres.index_bloat_size",
+ Priority: prioIndexBloatSize,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_bloat_size", Name: "bloat"},
+ },
+ }
+ indexUsageStatusChartTmpl = module.Chart{
+ ID: "index_%s_table_%s_db_%s_schema_%s_usage_status",
+ Title: "Index usage status",
+ Units: "status",
+ Fam: "maintenance",
+ Ctx: "postgres.index_usage_status",
+ Priority: prioIndexUsageStatus,
+ Dims: module.Dims{
+ {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_used", Name: "used"},
+ {ID: "index_%s_table_%s_db_%s_schema_%s_usage_status_unused", Name: "unused"},
+ },
+ }
+)
+
+func (p *Postgres) addNewIndexCharts(idx *indexMetrics) {
+ charts := indexChartsTmpl.Copy()
+
+ if idx.bloatSize == nil {
+ _ = charts.Remove(indexBloatSizeChartTmpl.ID)
+ _ = charts.Remove(indexBloatSizePercChartTmpl.ID)
+ }
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, idx.name, idx.table, idx.db, idx.schema)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: idx.db},
+ {Key: "schema", Value: idx.schema},
+ {Key: "table", Value: idx.table},
+ {Key: "parent_table", Value: idx.parentTable},
+ {Key: "index", Value: idx.name},
+ }
+ for _, d := range chart.Dims {
+ d.ID = fmt.Sprintf(d.ID, idx.name, idx.table, idx.db, idx.schema)
+ }
+ for _, v := range chart.Vars {
+ v.ID = fmt.Sprintf(v.ID, idx.name, idx.table, idx.db, idx.schema)
+ }
+ }
+
+ if err := p.Charts().Add(*charts...); err != nil {
+ p.Warning(err)
+ }
+}
+
+func (p *Postgres) removeIndexCharts(idx *indexMetrics) {
+ prefix := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s", idx.name, idx.table, idx.db, idx.schema)
+ for _, c := range *p.Charts() {
+ if strings.HasPrefix(c.ID, prefix) {
+ c.MarkRemove()
+ c.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/collect.go b/src/go/collectors/go.d.plugin/modules/postgres/collect.go
new file mode 100644
index 000000000..b43e2806e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/collect.go
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "strconv"
+ "time"
+
+ "github.com/jackc/pgx/v4"
+ "github.com/jackc/pgx/v4/stdlib"
+)
+
+const (
+ pgVersion94 = 9_04_00
+ pgVersion10 = 10_00_00
+ pgVersion11 = 11_00_00
+)
+
+func (p *Postgres) collect() (map[string]int64, error) {
+ if p.db == nil {
+ db, err := p.openPrimaryConnection()
+ if err != nil {
+ return nil, err
+ }
+ p.db = db
+ }
+
+ if p.pgVersion == 0 {
+ ver, err := p.doQueryServerVersion()
+ if err != nil {
+ return nil, fmt.Errorf("querying server version error: %v", err)
+ }
+ p.pgVersion = ver
+ p.Debugf("connected to PostgreSQL v%d", p.pgVersion)
+ }
+
+ if p.superUser == nil {
+ v, err := p.doQueryIsSuperUser()
+ if err != nil {
+ return nil, fmt.Errorf("querying is super user error: %v", err)
+ }
+ p.superUser = &v
+ p.Debugf("connected as super user: %v", *p.superUser)
+ }
+
+ if p.pgIsInRecovery == nil {
+ v, err := p.doQueryPGIsInRecovery()
+ if err != nil {
+ return nil, fmt.Errorf("querying recovery status error: %v", err)
+ }
+ p.pgIsInRecovery = &v
+ p.Debugf("the instance is in recovery mode: %v", *p.pgIsInRecovery)
+ }
+
+ now := time.Now()
+
+ if now.Sub(p.recheckSettingsTime) > p.recheckSettingsEvery {
+ p.recheckSettingsTime = now
+ maxConn, err := p.doQuerySettingsMaxConnections()
+ if err != nil {
+ return nil, fmt.Errorf("querying settings max connections error: %v", err)
+ }
+ p.mx.maxConnections = maxConn
+
+ maxLocks, err := p.doQuerySettingsMaxLocksHeld()
+ if err != nil {
+ return nil, fmt.Errorf("querying settings max locks held error: %v", err)
+ }
+ p.mx.maxLocksHeld = maxLocks
+ }
+
+ p.resetMetrics()
+
+ if p.pgVersion >= pgVersion10 {
+ // need 'backend_type' in pg_stat_activity
+ p.addXactQueryRunningTimeChartsOnce.Do(func() {
+ p.addTransactionsRunTimeHistogramChart()
+ p.addQueriesRunTimeHistogramChart()
+ })
+ }
+ if p.isSuperUser() {
+ p.addWALFilesChartsOnce.Do(p.addWALFilesCharts)
+ }
+
+ if err := p.doQueryGlobalMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryReplicationMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryDatabasesMetrics(); err != nil {
+ return nil, err
+ }
+ if p.dbSr != nil {
+ if err := p.doQueryQueryableDatabases(); err != nil {
+ return nil, err
+ }
+ }
+ if err := p.doQueryTablesMetrics(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryIndexesMetrics(); err != nil {
+ return nil, err
+ }
+
+ if now.Sub(p.doSlowTime) > p.doSlowEvery {
+ p.doSlowTime = now
+ if err := p.doQueryBloat(); err != nil {
+ return nil, err
+ }
+ if err := p.doQueryColumns(); err != nil {
+ return nil, err
+ }
+ }
+
+ mx := make(map[string]int64)
+ p.collectMetrics(mx)
+
+ return mx, nil
+}
+
+func (p *Postgres) openPrimaryConnection() (*sql.DB, error) {
+ db, err := sql.Open("pgx", p.DSN)
+ if err != nil {
+ return nil, fmt.Errorf("error on opening a connection with the Postgres database [%s]: %v", p.DSN, err)
+ }
+
+ db.SetMaxOpenConns(1)
+ db.SetMaxIdleConns(1)
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ if err := db.PingContext(ctx); err != nil {
+ _ = db.Close()
+ return nil, fmt.Errorf("error on pinging the Postgres database [%s]: %v", p.DSN, err)
+ }
+
+ return db, nil
+}
+
+func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, error) {
+ cfg, err := pgx.ParseConfig(p.DSN)
+ if err != nil {
+ return nil, "", fmt.Errorf("error on parsing DSN [%s]: %v", p.DSN, err)
+ }
+
+ cfg.Database = dbname
+ connStr := stdlib.RegisterConnConfig(cfg)
+
+ db, err := sql.Open("pgx", connStr)
+ if err != nil {
+ stdlib.UnregisterConnConfig(connStr)
+ return nil, "", fmt.Errorf("error on opening a secondary connection with the Postgres database [%s]: %v", dbname, err)
+ }
+
+ db.SetMaxOpenConns(1)
+ db.SetMaxIdleConns(1)
+ db.SetConnMaxLifetime(10 * time.Minute)
+
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ if err := db.PingContext(ctx); err != nil {
+ stdlib.UnregisterConnConfig(connStr)
+ _ = db.Close()
+ return nil, "", fmt.Errorf("error on pinging the secondary Postgres database [%s]: %v", dbname, err)
+ }
+
+ return db, connStr, nil
+}
+
+func (p *Postgres) isSuperUser() bool { return p.superUser != nil && *p.superUser }
+
+func (p *Postgres) isPGInRecovery() bool { return p.pgIsInRecovery != nil && *p.pgIsInRecovery }
+
+func (p *Postgres) getDBMetrics(name string) *dbMetrics {
+ db, ok := p.mx.dbs[name]
+ if !ok {
+ db = &dbMetrics{name: name}
+ p.mx.dbs[name] = db
+ }
+ return db
+}
+
+func (p *Postgres) getTableMetrics(name, db, schema string) *tableMetrics {
+ key := name + "_" + db + "_" + schema
+ m, ok := p.mx.tables[key]
+ if !ok {
+ m = &tableMetrics{db: db, schema: schema, name: name}
+ p.mx.tables[key] = m
+ }
+ return m
+}
+
+func (p *Postgres) hasTableMetrics(name, db, schema string) bool {
+ key := name + "_" + db + "_" + schema
+ _, ok := p.mx.tables[key]
+ return ok
+}
+
+func (p *Postgres) getIndexMetrics(name, table, db, schema string) *indexMetrics {
+ key := name + "_" + table + "_" + db + "_" + schema
+ m, ok := p.mx.indexes[key]
+ if !ok {
+ m = &indexMetrics{name: name, db: db, schema: schema, table: table}
+ p.mx.indexes[key] = m
+ }
+ return m
+}
+
+func (p *Postgres) hasIndexMetrics(name, table, db, schema string) bool {
+ key := name + "_" + table + "_" + db + "_" + schema
+ _, ok := p.mx.indexes[key]
+ return ok
+}
+
+func (p *Postgres) getReplAppMetrics(name string) *replStandbyAppMetrics {
+ app, ok := p.mx.replApps[name]
+ if !ok {
+ app = &replStandbyAppMetrics{name: name}
+ p.mx.replApps[name] = app
+ }
+ return app
+}
+
+func (p *Postgres) getReplSlotMetrics(name string) *replSlotMetrics {
+ slot, ok := p.mx.replSlots[name]
+ if !ok {
+ slot = &replSlotMetrics{name: name}
+ p.mx.replSlots[name] = slot
+ }
+ return slot
+}
+
+func parseInt(s string) int64 {
+ v, _ := strconv.ParseInt(s, 10, 64)
+ return v
+}
+
+func parseFloat(s string) int64 {
+ v, _ := strconv.ParseFloat(s, 64)
+ return int64(v)
+}
+
+func newInt(v int64) *int64 {
+ return &v
+}
+
+func calcPercentage(value, total int64) (v int64) {
+ if total == 0 {
+ return 0
+ }
+ if v = value * 100 / total; v < 0 {
+ v = -v
+ }
+ return v
+}
+
+func calcDeltaPercentage(a, b incDelta) int64 {
+ return calcPercentage(a.delta(), a.delta()+b.delta())
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go b/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go
new file mode 100644
index 000000000..84f9abbc7
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/collect_metrics.go
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "fmt"
+
+func (p *Postgres) collectMetrics(mx map[string]int64) {
+ mx["server_connections_used"] = p.mx.connUsed
+ if p.mx.maxConnections > 0 {
+ mx["server_connections_available"] = p.mx.maxConnections - p.mx.connUsed
+ mx["server_connections_utilization"] = calcPercentage(p.mx.connUsed, p.mx.maxConnections)
+ }
+ p.mx.xactTimeHist.WriteTo(mx, "transaction_running_time_hist", 1, 1)
+ p.mx.queryTimeHist.WriteTo(mx, "query_running_time_hist", 1, 1)
+ mx["server_uptime"] = p.mx.uptime
+ mx["server_connections_state_active"] = p.mx.connStateActive
+ mx["server_connections_state_idle"] = p.mx.connStateIdle
+ mx["server_connections_state_idle_in_transaction"] = p.mx.connStateIdleInTrans
+ mx["server_connections_state_idle_in_transaction_aborted"] = p.mx.connStateIdleInTransAborted
+ mx["server_connections_state_fastpath_function_call"] = p.mx.connStateFastpathFunctionCall
+ mx["server_connections_state_disabled"] = p.mx.connStateDisabled
+ mx["checkpoints_timed"] = p.mx.checkpointsTimed
+ mx["checkpoints_req"] = p.mx.checkpointsReq
+ mx["checkpoint_write_time"] = p.mx.checkpointWriteTime
+ mx["checkpoint_sync_time"] = p.mx.checkpointSyncTime
+ mx["buffers_checkpoint"] = p.mx.buffersCheckpoint
+ mx["buffers_clean"] = p.mx.buffersClean
+ mx["maxwritten_clean"] = p.mx.maxwrittenClean
+ mx["buffers_backend"] = p.mx.buffersBackend
+ mx["buffers_backend_fsync"] = p.mx.buffersBackendFsync
+ mx["buffers_alloc"] = p.mx.buffersAlloc
+ mx["oldest_current_xid"] = p.mx.oldestXID
+ mx["percent_towards_wraparound"] = p.mx.percentTowardsWraparound
+ mx["percent_towards_emergency_autovacuum"] = p.mx.percentTowardsEmergencyAutovacuum
+ mx["wal_writes"] = p.mx.walWrites
+ mx["wal_recycled_files"] = p.mx.walRecycledFiles
+ mx["wal_written_files"] = p.mx.walWrittenFiles
+ mx["wal_archive_files_ready_count"] = p.mx.walArchiveFilesReady
+ mx["wal_archive_files_done_count"] = p.mx.walArchiveFilesDone
+ mx["catalog_relkind_r_count"] = p.mx.relkindOrdinaryTable
+ mx["catalog_relkind_i_count"] = p.mx.relkindIndex
+ mx["catalog_relkind_S_count"] = p.mx.relkindSequence
+ mx["catalog_relkind_t_count"] = p.mx.relkindTOASTTable
+ mx["catalog_relkind_v_count"] = p.mx.relkindView
+ mx["catalog_relkind_m_count"] = p.mx.relkindMatView
+ mx["catalog_relkind_c_count"] = p.mx.relkindCompositeType
+ mx["catalog_relkind_f_count"] = p.mx.relkindForeignTable
+ mx["catalog_relkind_p_count"] = p.mx.relkindPartitionedTable
+ mx["catalog_relkind_I_count"] = p.mx.relkindPartitionedIndex
+ mx["catalog_relkind_r_size"] = p.mx.relkindOrdinaryTableSize
+ mx["catalog_relkind_i_size"] = p.mx.relkindIndexSize
+ mx["catalog_relkind_S_size"] = p.mx.relkindSequenceSize
+ mx["catalog_relkind_t_size"] = p.mx.relkindTOASTTableSize
+ mx["catalog_relkind_v_size"] = p.mx.relkindViewSize
+ mx["catalog_relkind_m_size"] = p.mx.relkindMatViewSize
+ mx["catalog_relkind_c_size"] = p.mx.relkindCompositeTypeSize
+ mx["catalog_relkind_f_size"] = p.mx.relkindForeignTableSize
+ mx["catalog_relkind_p_size"] = p.mx.relkindPartitionedTableSize
+ mx["catalog_relkind_I_size"] = p.mx.relkindPartitionedIndexSize
+ mx["autovacuum_analyze"] = p.mx.autovacuumWorkersAnalyze
+ mx["autovacuum_vacuum_analyze"] = p.mx.autovacuumWorkersVacuumAnalyze
+ mx["autovacuum_vacuum"] = p.mx.autovacuumWorkersVacuum
+ mx["autovacuum_vacuum_freeze"] = p.mx.autovacuumWorkersVacuumFreeze
+ mx["autovacuum_brin_summarize"] = p.mx.autovacuumWorkersBrinSummarize
+
+ var locksHeld int64
+ for name, m := range p.mx.dbs {
+ if !m.updated {
+ delete(p.mx.dbs, name)
+ p.removeDatabaseCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewDatabaseCharts(m)
+ if p.isPGInRecovery() {
+ p.addDBConflictsCharts(m)
+ }
+ }
+ px := "db_" + m.name + "_"
+ mx[px+"numbackends"] = m.numBackends
+ if m.datConnLimit <= 0 {
+ mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, p.mx.maxConnections)
+ } else {
+ mx[px+"numbackends_utilization"] = calcPercentage(m.numBackends, m.datConnLimit)
+ }
+ mx[px+"xact_commit"] = m.xactCommit
+ mx[px+"xact_rollback"] = m.xactRollback
+ mx[px+"blks_read"] = m.blksRead.last
+ mx[px+"blks_hit"] = m.blksHit.last
+ mx[px+"blks_read_perc"] = calcDeltaPercentage(m.blksRead, m.blksHit)
+ m.blksRead.prev, m.blksHit.prev = m.blksRead.last, m.blksHit.last
+ mx[px+"tup_returned"] = m.tupReturned.last
+ mx[px+"tup_fetched"] = m.tupFetched.last
+ mx[px+"tup_fetched_perc"] = calcPercentage(m.tupFetched.delta(), m.tupReturned.delta())
+ m.tupReturned.prev, m.tupFetched.prev = m.tupReturned.last, m.tupFetched.last
+ mx[px+"tup_inserted"] = m.tupInserted
+ mx[px+"tup_updated"] = m.tupUpdated
+ mx[px+"tup_deleted"] = m.tupDeleted
+ mx[px+"conflicts"] = m.conflicts
+ if m.size != nil {
+ mx[px+"size"] = *m.size
+ }
+ mx[px+"temp_files"] = m.tempFiles
+ mx[px+"temp_bytes"] = m.tempBytes
+ mx[px+"deadlocks"] = m.deadlocks
+ mx[px+"confl_tablespace"] = m.conflTablespace
+ mx[px+"confl_lock"] = m.conflLock
+ mx[px+"confl_snapshot"] = m.conflSnapshot
+ mx[px+"confl_bufferpin"] = m.conflBufferpin
+ mx[px+"confl_deadlock"] = m.conflDeadlock
+ mx[px+"lock_mode_AccessShareLock_held"] = m.accessShareLockHeld
+ mx[px+"lock_mode_RowShareLock_held"] = m.rowShareLockHeld
+ mx[px+"lock_mode_RowExclusiveLock_held"] = m.rowExclusiveLockHeld
+ mx[px+"lock_mode_ShareUpdateExclusiveLock_held"] = m.shareUpdateExclusiveLockHeld
+ mx[px+"lock_mode_ShareLock_held"] = m.shareLockHeld
+ mx[px+"lock_mode_ShareRowExclusiveLock_held"] = m.shareRowExclusiveLockHeld
+ mx[px+"lock_mode_ExclusiveLock_held"] = m.exclusiveLockHeld
+ mx[px+"lock_mode_AccessExclusiveLock_held"] = m.accessExclusiveLockHeld
+ mx[px+"lock_mode_AccessShareLock_awaited"] = m.accessShareLockAwaited
+ mx[px+"lock_mode_RowShareLock_awaited"] = m.rowShareLockAwaited
+ mx[px+"lock_mode_RowExclusiveLock_awaited"] = m.rowExclusiveLockAwaited
+ mx[px+"lock_mode_ShareUpdateExclusiveLock_awaited"] = m.shareUpdateExclusiveLockAwaited
+ mx[px+"lock_mode_ShareLock_awaited"] = m.shareLockAwaited
+ mx[px+"lock_mode_ShareRowExclusiveLock_awaited"] = m.shareRowExclusiveLockAwaited
+ mx[px+"lock_mode_ExclusiveLock_awaited"] = m.exclusiveLockAwaited
+ mx[px+"lock_mode_AccessExclusiveLock_awaited"] = m.accessExclusiveLockAwaited
+ locksHeld += m.accessShareLockHeld + m.rowShareLockHeld +
+ m.rowExclusiveLockHeld + m.shareUpdateExclusiveLockHeld +
+ m.shareLockHeld + m.shareRowExclusiveLockHeld +
+ m.exclusiveLockHeld + m.accessExclusiveLockHeld
+ }
+ mx["databases_count"] = int64(len(p.mx.dbs))
+ mx["locks_utilization"] = calcPercentage(locksHeld, p.mx.maxLocksHeld)
+
+ for name, m := range p.mx.tables {
+ if !m.updated {
+ delete(p.mx.tables, name)
+ p.removeTableCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewTableCharts(m)
+ }
+ if !m.hasLastAutoVacuumChart && m.lastAutoVacuumAgo > 0 {
+ m.hasLastAutoVacuumChart = true
+ p.addTableLastAutoVacuumAgoChart(m)
+ }
+ if !m.hasLastVacuumChart && m.lastVacuumAgo > 0 {
+ m.hasLastVacuumChart = true
+ p.addTableLastVacuumAgoChart(m)
+ }
+ if !m.hasLastAutoAnalyzeChart && m.lastAutoAnalyzeAgo > 0 {
+ m.hasLastAutoAnalyzeChart = true
+ p.addTableLastAutoAnalyzeAgoChart(m)
+ }
+ if !m.hasLastAnalyzeChart && m.lastAnalyzeAgo > 0 {
+ m.hasLastAnalyzeChart = true
+ p.addTableLastAnalyzeAgoChart(m)
+ }
+ if !m.hasTableIOCharts && m.heapBlksRead.last != -1 {
+ m.hasTableIOCharts = true
+ p.addTableIOChartsCharts(m)
+ }
+ if !m.hasTableIdxIOCharts && m.idxBlksRead.last != -1 {
+ m.hasTableIdxIOCharts = true
+ p.addTableIndexIOCharts(m)
+ }
+ if !m.hasTableTOASTIOCharts && m.toastBlksRead.last != -1 {
+ m.hasTableTOASTIOCharts = true
+ p.addTableTOASTIOCharts(m)
+ }
+ if !m.hasTableTOASTIdxIOCharts && m.tidxBlksRead.last != -1 {
+ m.hasTableTOASTIdxIOCharts = true
+ p.addTableTOASTIndexIOCharts(m)
+ }
+
+ px := fmt.Sprintf("table_%s_db_%s_schema_%s_", m.name, m.db, m.schema)
+
+ mx[px+"seq_scan"] = m.seqScan
+ mx[px+"seq_tup_read"] = m.seqTupRead
+ mx[px+"idx_scan"] = m.idxScan
+ mx[px+"idx_tup_fetch"] = m.idxTupFetch
+ mx[px+"n_live_tup"] = m.nLiveTup
+ mx[px+"n_dead_tup"] = m.nDeadTup
+ mx[px+"n_dead_tup_perc"] = calcPercentage(m.nDeadTup, m.nDeadTup+m.nLiveTup)
+ mx[px+"n_tup_ins"] = m.nTupIns
+ mx[px+"n_tup_upd"] = m.nTupUpd.last
+ mx[px+"n_tup_del"] = m.nTupDel
+ mx[px+"n_tup_hot_upd"] = m.nTupHotUpd.last
+ if m.lastAutoVacuumAgo != -1 {
+ mx[px+"last_autovacuum_ago"] = m.lastAutoVacuumAgo
+ }
+ if m.lastVacuumAgo != -1 {
+ mx[px+"last_vacuum_ago"] = m.lastVacuumAgo
+ }
+ if m.lastAutoAnalyzeAgo != -1 {
+ mx[px+"last_autoanalyze_ago"] = m.lastAutoAnalyzeAgo
+ }
+ if m.lastAnalyzeAgo != -1 {
+ mx[px+"last_analyze_ago"] = m.lastAnalyzeAgo
+ }
+ mx[px+"total_size"] = m.totalSize
+ if m.bloatSize != nil && m.bloatSizePerc != nil {
+ mx[px+"bloat_size"] = *m.bloatSize
+ mx[px+"bloat_size_perc"] = *m.bloatSizePerc
+ }
+ if m.nullColumns != nil {
+ mx[px+"null_columns"] = *m.nullColumns
+ }
+
+ mx[px+"n_tup_hot_upd_perc"] = calcPercentage(m.nTupHotUpd.delta(), m.nTupUpd.delta())
+ m.nTupHotUpd.prev, m.nTupUpd.prev = m.nTupHotUpd.last, m.nTupUpd.last
+
+ mx[px+"heap_blks_read"] = m.heapBlksRead.last
+ mx[px+"heap_blks_hit"] = m.heapBlksHit.last
+ mx[px+"heap_blks_read_perc"] = calcDeltaPercentage(m.heapBlksRead, m.heapBlksHit)
+ m.heapBlksHit.prev, m.heapBlksRead.prev = m.heapBlksHit.last, m.heapBlksRead.last
+
+ mx[px+"idx_blks_read"] = m.idxBlksRead.last
+ mx[px+"idx_blks_hit"] = m.idxBlksHit.last
+ mx[px+"idx_blks_read_perc"] = calcDeltaPercentage(m.idxBlksRead, m.idxBlksHit)
+ m.idxBlksHit.prev, m.idxBlksRead.prev = m.idxBlksHit.last, m.idxBlksRead.last
+
+ mx[px+"toast_blks_read"] = m.toastBlksRead.last
+ mx[px+"toast_blks_hit"] = m.toastBlksHit.last
+ mx[px+"toast_blks_read_perc"] = calcDeltaPercentage(m.toastBlksRead, m.toastBlksHit)
+ m.toastBlksHit.prev, m.toastBlksRead.prev = m.toastBlksHit.last, m.toastBlksRead.last
+
+ mx[px+"tidx_blks_read"] = m.tidxBlksRead.last
+ mx[px+"tidx_blks_hit"] = m.tidxBlksHit.last
+ mx[px+"tidx_blks_read_perc"] = calcDeltaPercentage(m.tidxBlksRead, m.tidxBlksHit)
+ m.tidxBlksHit.prev, m.tidxBlksRead.prev = m.tidxBlksHit.last, m.tidxBlksRead.last
+ }
+
+ for name, m := range p.mx.indexes {
+ if !m.updated {
+ delete(p.mx.indexes, name)
+ p.removeIndexCharts(m)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewIndexCharts(m)
+ }
+
+ px := fmt.Sprintf("index_%s_table_%s_db_%s_schema_%s_", m.name, m.table, m.db, m.schema)
+ mx[px+"size"] = m.size
+ if m.bloatSize != nil && m.bloatSizePerc != nil {
+ mx[px+"bloat_size"] = *m.bloatSize
+ mx[px+"bloat_size_perc"] = *m.bloatSizePerc
+ }
+ if m.idxScan+m.idxTupRead+m.idxTupFetch > 0 {
+ mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 1, 0
+ } else {
+ mx[px+"usage_status_used"], mx[px+"usage_status_unused"] = 0, 1
+ }
+ }
+
+ for name, m := range p.mx.replApps {
+ if !m.updated {
+ delete(p.mx.replApps, name)
+ p.removeReplicationStandbyAppCharts(name)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewReplicationStandbyAppCharts(name)
+ }
+ px := "repl_standby_app_" + m.name + "_wal_"
+ mx[px+"sent_lag_size"] = m.walSentDelta
+ mx[px+"write_lag_size"] = m.walWriteDelta
+ mx[px+"flush_lag_size"] = m.walFlushDelta
+ mx[px+"replay_lag_size"] = m.walReplayDelta
+ mx[px+"write_time"] = m.walWriteLag
+ mx[px+"flush_lag_time"] = m.walFlushLag
+ mx[px+"replay_lag_time"] = m.walReplayLag
+ }
+
+ for name, m := range p.mx.replSlots {
+ if !m.updated {
+ delete(p.mx.replSlots, name)
+ p.removeReplicationSlotCharts(name)
+ continue
+ }
+ if !m.hasCharts {
+ m.hasCharts = true
+ p.addNewReplicationSlotCharts(name)
+ }
+ px := "repl_slot_" + m.name + "_"
+ mx[px+"replslot_wal_keep"] = m.walKeep
+ mx[px+"replslot_files"] = m.files
+ }
+}
+
+func (p *Postgres) resetMetrics() {
+ p.mx.srvMetrics = srvMetrics{
+ xactTimeHist: p.mx.xactTimeHist,
+ queryTimeHist: p.mx.queryTimeHist,
+ maxConnections: p.mx.maxConnections,
+ maxLocksHeld: p.mx.maxLocksHeld,
+ }
+ for name, m := range p.mx.dbs {
+ p.mx.dbs[name] = &dbMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ blksRead: incDelta{prev: m.blksRead.prev},
+ blksHit: incDelta{prev: m.blksHit.prev},
+ tupReturned: incDelta{prev: m.tupReturned.prev},
+ tupFetched: incDelta{prev: m.tupFetched.prev},
+ }
+ }
+ for name, m := range p.mx.tables {
+ p.mx.tables[name] = &tableMetrics{
+ db: m.db,
+ schema: m.schema,
+ name: m.name,
+ hasCharts: m.hasCharts,
+ hasLastAutoVacuumChart: m.hasLastAutoVacuumChart,
+ hasLastVacuumChart: m.hasLastVacuumChart,
+ hasLastAutoAnalyzeChart: m.hasLastAutoAnalyzeChart,
+ hasLastAnalyzeChart: m.hasLastAnalyzeChart,
+ hasTableIOCharts: m.hasTableIOCharts,
+ hasTableIdxIOCharts: m.hasTableIdxIOCharts,
+ hasTableTOASTIOCharts: m.hasTableTOASTIOCharts,
+ hasTableTOASTIdxIOCharts: m.hasTableTOASTIdxIOCharts,
+ nTupUpd: incDelta{prev: m.nTupUpd.prev},
+ nTupHotUpd: incDelta{prev: m.nTupHotUpd.prev},
+ heapBlksRead: incDelta{prev: m.heapBlksRead.prev},
+ heapBlksHit: incDelta{prev: m.heapBlksHit.prev},
+ idxBlksRead: incDelta{prev: m.idxBlksRead.prev},
+ idxBlksHit: incDelta{prev: m.idxBlksHit.prev},
+ toastBlksRead: incDelta{prev: m.toastBlksRead.prev},
+ toastBlksHit: incDelta{prev: m.toastBlksHit.prev},
+ tidxBlksRead: incDelta{prev: m.tidxBlksRead.prev},
+ tidxBlksHit: incDelta{prev: m.tidxBlksHit.prev},
+ bloatSize: m.bloatSize,
+ bloatSizePerc: m.bloatSizePerc,
+ nullColumns: m.nullColumns,
+ }
+ }
+ for name, m := range p.mx.indexes {
+ p.mx.indexes[name] = &indexMetrics{
+ name: m.name,
+ db: m.db,
+ schema: m.schema,
+ table: m.table,
+ updated: m.updated,
+ hasCharts: m.hasCharts,
+ bloatSize: m.bloatSize,
+ bloatSizePerc: m.bloatSizePerc,
+ }
+ }
+ for name, m := range p.mx.replApps {
+ p.mx.replApps[name] = &replStandbyAppMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ }
+ }
+ for name, m := range p.mx.replSlots {
+ p.mx.replSlots[name] = &replSlotMetrics{
+ name: m.name,
+ hasCharts: m.hasCharts,
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json b/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json
new file mode 100644
index 000000000..42bff329b
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/config_schema.json
@@ -0,0 +1,141 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Postgres collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "Postgres server Data Source Name in [key/value string](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-KEYWORD-VALUE) or [URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS) format.",
+ "type": "string",
+ "default": "postgres://netdata:password@127.0.0.1:5432/postgres"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for queries, in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "collect_databases_matching": {
+ "title": "Database selector",
+ "description": "Configuration for monitoring specific databases using [Netdata simple patterns](https://github.com/netdata/netdata/tree/master/src/libnetdata/simple_pattern#readme). If left empty, no database metrics will be collected.",
+ "type": "string"
+ },
+ "max_db_tables": {
+ "title": "Table limit",
+ "description": "Table metrics will not be collected for databases that have more tables than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "max_db_indexes": {
+ "title": "Index limit",
+ "description": "Index metrics will not be collected for databases that have more indexes than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 250
+ },
+ "transaction_time_histogram": {
+ "title": "Transaction time histogram",
+ "description": "Buckets for transaction time histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ },
+ "query_time_histogram": {
+ "title": "Query time histogram",
+ "description": "Buckets for query time histogram in milliseconds.",
+ "type": [
+ "array",
+ "null"
+ ],
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ }
+ },
+ "required": [
+ "dsn"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "dsn",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Database stats",
+ "fields": [
+ "max_db_tables",
+ "max_db_indexes",
+ "collect_databases_matching"
+ ]
+ },
+ {
+ "title": "Histograms",
+ "fields": [
+ "transaction_time_histogram",
+ "query_time_histogram"
+ ]
+ }
+ ]
+ },
+ "dsn": {
+ "ui:placeholder": "postgres://username:password@host:port/dbname"
+ },
+ "transaction_time_histogram": {
+ "ui:listFlavour": "list"
+ },
+ "query_time_histogram": {
+ "ui:listFlavour": "list"
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query.go
new file mode 100644
index 000000000..3b90be0d7
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query.go
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "context"
+ "database/sql"
+)
+
+func (p *Postgres) doQueryRow(query string, v any) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ return p.db.QueryRowContext(ctx, query).Scan(v)
+}
+
+func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ return db.QueryRowContext(ctx, query).Scan(v)
+}
+
+func (p *Postgres) doQuery(query string, assign func(column, value string, rowEnd bool)) error {
+ return p.doDBQuery(p.db, query, assign)
+}
+
+func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error {
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
+ defer cancel()
+
+ rows, err := db.QueryContext(ctx, query)
+ if err != nil {
+ return err
+ }
+ defer func() { _ = rows.Close() }()
+
+ return readRows(rows, assign)
+}
+
+func readRows(rows *sql.Rows, assign func(column, value string, rowEnd bool)) error {
+ if assign == nil {
+ return nil
+ }
+
+ columns, err := rows.Columns()
+ if err != nil {
+ return err
+ }
+
+ values := makeValues(len(columns))
+
+ for rows.Next() {
+ if err := rows.Scan(values...); err != nil {
+ return err
+ }
+ for i, l := 0, len(values); i < l; i++ {
+ assign(columns[i], valueToString(values[i]), i == l-1)
+ }
+ }
+ return rows.Err()
+}
+
+func valueToString(value any) string {
+ v, ok := value.(*sql.NullString)
+ if !ok || !v.Valid {
+ return ""
+ }
+ return v.String
+}
+
+func makeValues(size int) []any {
+ vs := make([]any, size)
+ for i := range vs {
+ vs[i] = &sql.NullString{}
+ }
+ return vs
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go
new file mode 100644
index 000000000..ae1add4ac
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_bloat.go
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "database/sql"
+
+func (p *Postgres) doQueryBloat() error {
+ if err := p.doDBQueryBloat(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryBloat(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryBloat(db *sql.DB) error {
+ q := queryBloat()
+
+ for _, m := range p.mx.tables {
+ if m.bloatSize != nil {
+ m.bloatSize = newInt(0)
+ }
+ if m.bloatSizePerc != nil {
+ m.bloatSizePerc = newInt(0)
+ }
+ }
+ for _, m := range p.mx.indexes {
+ if m.bloatSize != nil {
+ m.bloatSize = newInt(0)
+ }
+ if m.bloatSizePerc != nil {
+ m.bloatSizePerc = newInt(0)
+ }
+ }
+
+ var dbname, schema, table, iname string
+ var tableWasted, idxWasted int64
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "db":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "tablename":
+ table = value
+ case "wastedbytes":
+ tableWasted = parseFloat(value)
+ case "iname":
+ iname = value
+ case "wastedibytes":
+ idxWasted = parseFloat(value)
+ }
+ if !rowEnd {
+ return
+ }
+ if p.hasTableMetrics(table, dbname, schema) {
+ v := p.getTableMetrics(table, dbname, schema)
+ v.bloatSize = newInt(tableWasted)
+ v.bloatSizePerc = newInt(calcPercentage(tableWasted, v.totalSize))
+ }
+ if iname != "?" && p.hasIndexMetrics(iname, table, dbname, schema) {
+ v := p.getIndexMetrics(iname, table, dbname, schema)
+ v.bloatSize = newInt(idxWasted)
+ v.bloatSizePerc = newInt(calcPercentage(idxWasted, v.size))
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go
new file mode 100644
index 000000000..1da655aaf
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_columns.go
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "database/sql"
+
+func (p *Postgres) doQueryColumns() error {
+ if err := p.doDBQueryColumns(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryColumns(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryColumns(db *sql.DB) error {
+ q := queryColumnsStats()
+
+ for _, m := range p.mx.tables {
+ if m.nullColumns != nil {
+ m.nullColumns = newInt(0)
+ }
+ }
+
+ var dbname, schema, table string
+ var nullPerc int64
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ table = value
+ case "null_percent":
+ nullPerc = parseInt(value)
+ }
+ if !rowEnd {
+ return
+ }
+ if nullPerc == 100 && p.hasTableMetrics(table, dbname, schema) {
+ v := p.getTableMetrics(table, dbname, schema)
+ if v.nullColumns == nil {
+ v.nullColumns = newInt(0)
+ }
+ *v.nullColumns++
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go
new file mode 100644
index 000000000..0cee7a0cd
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_databases.go
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+)
+
+func (p *Postgres) doQueryDatabasesMetrics() error {
+ if err := p.doQueryDatabaseStats(); err != nil {
+ return fmt.Errorf("querying database stats error: %v", err)
+ }
+ if err := p.doQueryDatabaseSize(); err != nil {
+ return fmt.Errorf("querying database size error: %v", err)
+ }
+ if p.isPGInRecovery() {
+ if err := p.doQueryDatabaseConflicts(); err != nil {
+ return fmt.Errorf("querying database conflicts error: %v", err)
+ }
+ }
+ if err := p.doQueryDatabaseLocks(); err != nil {
+ return fmt.Errorf("querying database locks error: %v", err)
+ }
+ return nil
+}
+
+func (p *Postgres) doQueryDatabaseStats() error {
+ q := queryDatabaseStats()
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "numbackends":
+ p.getDBMetrics(db).numBackends = parseInt(value)
+ case "datconnlimit":
+ p.getDBMetrics(db).datConnLimit = parseInt(value)
+ case "xact_commit":
+ p.getDBMetrics(db).xactCommit = parseInt(value)
+ case "xact_rollback":
+ p.getDBMetrics(db).xactRollback = parseInt(value)
+ case "blks_read_bytes":
+ p.getDBMetrics(db).blksRead.last = parseInt(value)
+ case "blks_hit_bytes":
+ p.getDBMetrics(db).blksHit.last = parseInt(value)
+ case "tup_returned":
+ p.getDBMetrics(db).tupReturned.last = parseInt(value)
+ case "tup_fetched":
+ p.getDBMetrics(db).tupFetched.last = parseInt(value)
+ case "tup_inserted":
+ p.getDBMetrics(db).tupInserted = parseInt(value)
+ case "tup_updated":
+ p.getDBMetrics(db).tupUpdated = parseInt(value)
+ case "tup_deleted":
+ p.getDBMetrics(db).tupDeleted = parseInt(value)
+ case "conflicts":
+ p.getDBMetrics(db).conflicts = parseInt(value)
+ case "temp_files":
+ p.getDBMetrics(db).tempFiles = parseInt(value)
+ case "temp_bytes":
+ p.getDBMetrics(db).tempBytes = parseInt(value)
+ case "deadlocks":
+ p.getDBMetrics(db).deadlocks = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseSize() error {
+ q := queryDatabaseSize(p.pgVersion)
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ case "size":
+ p.getDBMetrics(db).size = newInt(parseInt(value))
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseConflicts() error {
+ q := queryDatabaseConflicts()
+
+ var db string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "confl_tablespace":
+ p.getDBMetrics(db).conflTablespace = parseInt(value)
+ case "confl_lock":
+ p.getDBMetrics(db).conflLock = parseInt(value)
+ case "confl_snapshot":
+ p.getDBMetrics(db).conflSnapshot = parseInt(value)
+ case "confl_bufferpin":
+ p.getDBMetrics(db).conflBufferpin = parseInt(value)
+ case "confl_deadlock":
+ p.getDBMetrics(db).conflDeadlock = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryDatabaseLocks() error {
+ q := queryDatabaseLocks()
+
+ var db, mode string
+ var granted bool
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ db = value
+ p.getDBMetrics(db).updated = true
+ case "mode":
+ mode = value
+ case "granted":
+ granted = value == "true" || value == "t"
+ case "locks_count":
+ // https://github.com/postgres/postgres/blob/7c34555f8c39eeefcc45b3c3f027d7a063d738fc/src/include/storage/lockdefs.h#L36-L45
+ // https://www.postgresql.org/docs/7.2/locking-tables.html
+ switch {
+ case mode == "AccessShareLock" && granted:
+ p.getDBMetrics(db).accessShareLockHeld = parseInt(value)
+ case mode == "AccessShareLock":
+ p.getDBMetrics(db).accessShareLockAwaited = parseInt(value)
+ case mode == "RowShareLock" && granted:
+ p.getDBMetrics(db).rowShareLockHeld = parseInt(value)
+ case mode == "RowShareLock":
+ p.getDBMetrics(db).rowShareLockAwaited = parseInt(value)
+ case mode == "RowExclusiveLock" && granted:
+ p.getDBMetrics(db).rowExclusiveLockHeld = parseInt(value)
+ case mode == "RowExclusiveLock":
+ p.getDBMetrics(db).rowExclusiveLockAwaited = parseInt(value)
+ case mode == "ShareUpdateExclusiveLock" && granted:
+ p.getDBMetrics(db).shareUpdateExclusiveLockHeld = parseInt(value)
+ case mode == "ShareUpdateExclusiveLock":
+ p.getDBMetrics(db).shareUpdateExclusiveLockAwaited = parseInt(value)
+ case mode == "ShareLock" && granted:
+ p.getDBMetrics(db).shareLockHeld = parseInt(value)
+ case mode == "ShareLock":
+ p.getDBMetrics(db).shareLockAwaited = parseInt(value)
+ case mode == "ShareRowExclusiveLock" && granted:
+ p.getDBMetrics(db).shareRowExclusiveLockHeld = parseInt(value)
+ case mode == "ShareRowExclusiveLock":
+ p.getDBMetrics(db).shareRowExclusiveLockAwaited = parseInt(value)
+ case mode == "ExclusiveLock" && granted:
+ p.getDBMetrics(db).exclusiveLockHeld = parseInt(value)
+ case mode == "ExclusiveLock":
+ p.getDBMetrics(db).exclusiveLockAwaited = parseInt(value)
+ case mode == "AccessExclusiveLock" && granted:
+ p.getDBMetrics(db).accessExclusiveLockHeld = parseInt(value)
+ case mode == "AccessExclusiveLock":
+ p.getDBMetrics(db).accessExclusiveLockAwaited = parseInt(value)
+ }
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go
new file mode 100644
index 000000000..c70772a23
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_global.go
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+ "strconv"
+)
+
+func (p *Postgres) doQueryGlobalMetrics() error {
+ if err := p.doQueryConnectionsUsed(); err != nil {
+ return fmt.Errorf("querying server connections used error: %v", err)
+ }
+ if err := p.doQueryConnectionsState(); err != nil {
+ return fmt.Errorf("querying server connections state error: %v", err)
+ }
+ if err := p.doQueryCheckpoints(); err != nil {
+ return fmt.Errorf("querying database conflicts error: %v", err)
+ }
+ if err := p.doQueryUptime(); err != nil {
+ return fmt.Errorf("querying server uptime error: %v", err)
+ }
+ if err := p.doQueryTXIDWraparound(); err != nil {
+ return fmt.Errorf("querying txid wraparound error: %v", err)
+ }
+ if err := p.doQueryWALWrites(); err != nil {
+ return fmt.Errorf("querying wal writes error: %v", err)
+ }
+ if err := p.doQueryCatalogRelations(); err != nil {
+ return fmt.Errorf("querying catalog relations error: %v", err)
+ }
+ if p.pgVersion >= pgVersion94 {
+ if err := p.doQueryAutovacuumWorkers(); err != nil {
+ return fmt.Errorf("querying autovacuum workers error: %v", err)
+ }
+ }
+ if p.pgVersion >= pgVersion10 {
+ if err := p.doQueryXactQueryRunningTime(); err != nil {
+ return fmt.Errorf("querying xact/query running time: %v", err)
+ }
+ }
+
+ if !p.isSuperUser() {
+ return nil
+ }
+
+ if p.pgVersion >= pgVersion94 {
+ if err := p.doQueryWALFiles(); err != nil {
+ return fmt.Errorf("querying wal files error: %v", err)
+ }
+ }
+ if err := p.doQueryWALArchiveFiles(); err != nil {
+ return fmt.Errorf("querying wal archive files error: %v", err)
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryConnectionsUsed() error {
+ q := queryServerCurrentConnectionsUsed()
+
+ var v string
+ if err := p.doQueryRow(q, &v); err != nil {
+ return err
+ }
+
+ p.mx.connUsed = parseInt(v)
+
+ return nil
+}
+
+func (p *Postgres) doQueryConnectionsState() error {
+ q := queryServerConnectionsState()
+
+ var state string
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "state":
+ state = value
+ case "count":
+ switch state {
+ case "active":
+ p.mx.connStateActive = parseInt(value)
+ case "idle":
+ p.mx.connStateIdle = parseInt(value)
+ case "idle in transaction":
+ p.mx.connStateIdleInTrans = parseInt(value)
+ case "idle in transaction (aborted)":
+ p.mx.connStateIdleInTransAborted = parseInt(value)
+ case "fastpath function call":
+ p.mx.connStateFastpathFunctionCall = parseInt(value)
+ case "disabled":
+ p.mx.connStateDisabled = parseInt(value)
+ }
+ }
+ })
+}
+
+func (p *Postgres) doQueryCheckpoints() error {
+ q := queryCheckpoints()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "checkpoints_timed":
+ p.mx.checkpointsTimed = parseInt(value)
+ case "checkpoints_req":
+ p.mx.checkpointsReq = parseInt(value)
+ case "checkpoint_write_time":
+ p.mx.checkpointWriteTime = parseInt(value)
+ case "checkpoint_sync_time":
+ p.mx.checkpointSyncTime = parseInt(value)
+ case "buffers_checkpoint_bytes":
+ p.mx.buffersCheckpoint = parseInt(value)
+ case "buffers_clean_bytes":
+ p.mx.buffersClean = parseInt(value)
+ case "maxwritten_clean":
+ p.mx.maxwrittenClean = parseInt(value)
+ case "buffers_backend_bytes":
+ p.mx.buffersBackend = parseInt(value)
+ case "buffers_backend_fsync":
+ p.mx.buffersBackendFsync = parseInt(value)
+ case "buffers_alloc_bytes":
+ p.mx.buffersAlloc = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryUptime() error {
+ q := queryServerUptime()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return err
+ }
+
+ p.mx.uptime = parseFloat(s)
+
+ return nil
+}
+
+func (p *Postgres) doQueryTXIDWraparound() error {
+ q := queryTXIDWraparound()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "oldest_current_xid":
+ p.mx.oldestXID = parseInt(value)
+ case "percent_towards_wraparound":
+ p.mx.percentTowardsWraparound = parseInt(value)
+ case "percent_towards_emergency_autovacuum":
+ p.mx.percentTowardsEmergencyAutovacuum = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryWALWrites() error {
+ q := queryWALWrites(p.pgVersion)
+
+ var v int64
+ if err := p.doQueryRow(q, &v); err != nil {
+ return err
+ }
+
+ p.mx.walWrites = v
+
+ return nil
+}
+
+func (p *Postgres) doQueryWALFiles() error {
+ q := queryWALFiles(p.pgVersion)
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "wal_recycled_files":
+ p.mx.walRecycledFiles = parseInt(value)
+ case "wal_written_files":
+ p.mx.walWrittenFiles = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryWALArchiveFiles() error {
+ q := queryWALArchiveFiles(p.pgVersion)
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "wal_archive_files_ready_count":
+ p.mx.walArchiveFilesReady = parseInt(value)
+ case "wal_archive_files_done_count":
+ p.mx.walArchiveFilesDone = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryCatalogRelations() error {
+ q := queryCatalogRelations()
+
+ var kind string
+ var count, size int64
+ return p.doQuery(q, func(column, value string, rowEnd bool) {
+ switch column {
+ case "relkind":
+ kind = value
+ case "count":
+ count = parseInt(value)
+ case "size":
+ size = parseInt(value)
+ }
+ if !rowEnd {
+ return
+ }
+ // https://www.postgresql.org/docs/current/catalog-pg-class.html
+ switch kind {
+ case "r":
+ p.mx.relkindOrdinaryTable = count
+ p.mx.relkindOrdinaryTableSize = size
+ case "i":
+ p.mx.relkindIndex = count
+ p.mx.relkindIndexSize = size
+ case "S":
+ p.mx.relkindSequence = count
+ p.mx.relkindSequenceSize = size
+ case "t":
+ p.mx.relkindTOASTTable = count
+ p.mx.relkindTOASTTableSize = size
+ case "v":
+ p.mx.relkindView = count
+ p.mx.relkindViewSize = size
+ case "m":
+ p.mx.relkindMatView = count
+ p.mx.relkindMatViewSize = size
+ case "c":
+ p.mx.relkindCompositeType = count
+ p.mx.relkindCompositeTypeSize = size
+ case "f":
+ p.mx.relkindForeignTable = count
+ p.mx.relkindForeignTableSize = size
+ case "p":
+ p.mx.relkindPartitionedTable = count
+ p.mx.relkindPartitionedTableSize = size
+ case "I":
+ p.mx.relkindPartitionedIndex = count
+ p.mx.relkindPartitionedIndexSize = size
+ }
+ })
+}
+
+func (p *Postgres) doQueryAutovacuumWorkers() error {
+ q := queryAutovacuumWorkers()
+
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "autovacuum_analyze":
+ p.mx.autovacuumWorkersAnalyze = parseInt(value)
+ case "autovacuum_vacuum_analyze":
+ p.mx.autovacuumWorkersVacuumAnalyze = parseInt(value)
+ case "autovacuum_vacuum":
+ p.mx.autovacuumWorkersVacuum = parseInt(value)
+ case "autovacuum_vacuum_freeze":
+ p.mx.autovacuumWorkersVacuumFreeze = parseInt(value)
+ case "autovacuum_brin_summarize":
+ p.mx.autovacuumWorkersBrinSummarize = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryXactQueryRunningTime() error {
+ q := queryXactQueryRunningTime()
+
+ var state string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "state":
+ state = value
+ case "xact_running_time":
+ v, _ := strconv.ParseFloat(value, 64)
+ p.mx.xactTimeHist.Observe(v)
+ case "query_running_time":
+ if state == "active" {
+ v, _ := strconv.ParseFloat(value, 64)
+ p.mx.queryTimeHist.Observe(v)
+ }
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go
new file mode 100644
index 000000000..f5eb15bb3
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_indexes.go
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+)
+
+func (p *Postgres) doQueryIndexesMetrics() error {
+ if err := p.doQueryStatUserIndexes(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryStatUserIndexes() error {
+ if err := p.doDBQueryStatUserIndexes(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatUserIndexes(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryStatUserIndexes(db *sql.DB) error {
+ q := queryStatUserIndexes()
+
+ var dbname, schema, table, name string
+ return p.doDBQuery(db, q, func(column, value string, _ bool) {
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ table = value
+ case "indexrelname":
+ name = value
+ p.getIndexMetrics(name, table, dbname, schema).updated = true
+ case "parent_relname":
+ p.getIndexMetrics(name, table, dbname, schema).parentTable = value
+ case "idx_scan":
+ p.getIndexMetrics(name, table, dbname, schema).idxScan = parseInt(value)
+ case "idx_tup_read":
+ p.getIndexMetrics(name, table, dbname, schema).idxTupRead = parseInt(value)
+ case "idx_tup_fetch":
+ p.getIndexMetrics(name, table, dbname, schema).idxTupFetch = parseInt(value)
+ case "size":
+ p.getIndexMetrics(name, table, dbname, schema).size = parseInt(value)
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go
new file mode 100644
index 000000000..a2299c8b4
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_misc.go
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ "strconv"
+
+ "github.com/jackc/pgx/v4/stdlib"
+)
+
+func (p *Postgres) doQueryServerVersion() (int, error) {
+ q := queryServerVersion()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.Atoi(s)
+}
+
+func (p *Postgres) doQueryIsSuperUser() (bool, error) {
+ q := queryIsSuperUser()
+
+ var v bool
+ if err := p.doQueryRow(q, &v); err != nil {
+ return false, err
+ }
+
+ return v, nil
+}
+
+func (p *Postgres) doQueryPGIsInRecovery() (bool, error) {
+ q := queryPGIsInRecovery()
+
+ var v bool
+ if err := p.doQueryRow(q, &v); err != nil {
+ return false, err
+ }
+
+ return v, nil
+}
+
+func (p *Postgres) doQuerySettingsMaxConnections() (int64, error) {
+ q := querySettingsMaxConnections()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.ParseInt(s, 10, 64)
+}
+
+func (p *Postgres) doQuerySettingsMaxLocksHeld() (int64, error) {
+ q := querySettingsMaxLocksHeld()
+
+ var s string
+ if err := p.doQueryRow(q, &s); err != nil {
+ return 0, err
+ }
+
+ return strconv.ParseInt(s, 10, 64)
+}
+
+const connErrMax = 3
+
+func (p *Postgres) doQueryQueryableDatabases() error {
+ q := queryQueryableDatabaseList()
+
+ var dbs []string
+ err := p.doQuery(q, func(_, value string, _ bool) {
+ if p.dbSr != nil && p.dbSr.MatchString(value) {
+ dbs = append(dbs, value)
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ seen := make(map[string]bool, len(dbs))
+
+ for _, dbname := range dbs {
+ seen[dbname] = true
+
+ conn, ok := p.dbConns[dbname]
+ if !ok {
+ conn = &dbConn{}
+ p.dbConns[dbname] = conn
+ }
+
+ if conn.db != nil || conn.connErrors >= connErrMax {
+ continue
+ }
+
+ db, connStr, err := p.openSecondaryConnection(dbname)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ continue
+ }
+
+ tables, err := p.doDBQueryUserTablesCount(db)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ indexes, err := p.doDBQueryUserIndexesCount(db)
+ if err != nil {
+ p.Warning(err)
+ conn.connErrors++
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ if (p.MaxDBTables != 0 && tables > p.MaxDBTables) || (p.MaxDBIndexes != 0 && indexes > p.MaxDBIndexes) {
+ p.Warningf("database '%s' has too many user tables(%d/%d)/indexes(%d/%d), skipping it",
+ dbname, tables, p.MaxDBTables, indexes, p.MaxDBIndexes)
+ conn.connErrors = connErrMax
+ _ = db.Close()
+ stdlib.UnregisterConnConfig(connStr)
+ continue
+ }
+
+ conn.db, conn.connStr = db, connStr
+ }
+
+ for dbname, conn := range p.dbConns {
+ if seen[dbname] {
+ continue
+ }
+ delete(p.dbConns, dbname)
+ if conn.connStr != "" {
+ stdlib.UnregisterConnConfig(conn.connStr)
+ }
+ if conn.db != nil {
+ _ = conn.db.Close()
+ }
+ }
+
+ return nil
+}
+
+func (p *Postgres) doDBQueryUserTablesCount(db *sql.DB) (int64, error) {
+ q := queryUserTablesCount()
+
+ var v string
+ if err := p.doDBQueryRow(db, q, &v); err != nil {
+ return 0, err
+ }
+
+ return parseInt(v), nil
+}
+
+func (p *Postgres) doDBQueryUserIndexesCount(db *sql.DB) (int64, error) {
+ q := queryUserIndexesCount()
+
+ var v string
+ if err := p.doDBQueryRow(db, q, &v); err != nil {
+ return 0, err
+ }
+
+ return parseInt(v), nil
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go
new file mode 100644
index 000000000..22ff47003
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_replication.go
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "fmt"
+)
+
+func (p *Postgres) doQueryReplicationMetrics() error {
+ if err := p.doQueryReplStandbyAppWALDelta(); err != nil {
+ return fmt.Errorf("querying replication standby app wal delta error: %v", err)
+ }
+
+ if p.pgVersion >= pgVersion10 {
+ if err := p.doQueryReplStandbyAppWALLag(); err != nil {
+ return fmt.Errorf("querying replication standby app wal lag error: %v", err)
+ }
+ }
+
+ if p.pgVersion >= pgVersion10 && p.isSuperUser() {
+ if err := p.doQueryReplSlotFiles(); err != nil {
+ return fmt.Errorf("querying replication slot files error: %v", err)
+ }
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryReplStandbyAppWALDelta() error {
+ q := queryReplicationStandbyAppDelta(p.pgVersion)
+
+ var app string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "application_name":
+ app = value
+ p.getReplAppMetrics(app).updated = true
+ default:
+ // TODO: delta calculation was changed in https://github.com/netdata/netdata/go/go.d.plugin/pull/1039
+ // - 'replay_delta' (probably other deltas too?) can be negative
+ // - Also, WAL delta != WAL lag after that PR
+ v := parseInt(value)
+ if v < 0 {
+ v = 0
+ }
+ switch column {
+ case "sent_delta":
+ p.getReplAppMetrics(app).walSentDelta += v
+ case "write_delta":
+ p.getReplAppMetrics(app).walWriteDelta += v
+ case "flush_delta":
+ p.getReplAppMetrics(app).walFlushDelta += v
+ case "replay_delta":
+ p.getReplAppMetrics(app).walReplayDelta += v
+ }
+ }
+ })
+}
+
+func (p *Postgres) doQueryReplStandbyAppWALLag() error {
+ q := queryReplicationStandbyAppLag()
+
+ var app string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "application_name":
+ app = value
+ p.getReplAppMetrics(app).updated = true
+ case "write_lag":
+ p.getReplAppMetrics(app).walWriteLag += parseInt(value)
+ case "flush_lag":
+ p.getReplAppMetrics(app).walFlushLag += parseInt(value)
+ case "replay_lag":
+ p.getReplAppMetrics(app).walReplayLag += parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doQueryReplSlotFiles() error {
+ q := queryReplicationSlotFiles(p.pgVersion)
+
+ var slot string
+ return p.doQuery(q, func(column, value string, _ bool) {
+ switch column {
+ case "slot_name":
+ slot = value
+ p.getReplSlotMetrics(slot).updated = true
+ case "replslot_wal_keep":
+ p.getReplSlotMetrics(slot).walKeep += parseInt(value)
+ case "replslot_files":
+ p.getReplSlotMetrics(slot).files += parseInt(value)
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go b/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go
new file mode 100644
index 000000000..5b3e2c71d
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/do_query_tables.go
@@ -0,0 +1,147 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ "strings"
+)
+
+func (p *Postgres) doQueryTablesMetrics() error {
+ if err := p.doQueryStatUserTable(); err != nil {
+ return err
+ }
+ if err := p.doQueryStatIOUserTables(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *Postgres) doQueryStatUserTable() error {
+ if err := p.doDBQueryStatUserTables(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatUserTables(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doQueryStatIOUserTables() error {
+ if err := p.doDBQueryStatIOUserTables(p.db); err != nil {
+ p.Warning(err)
+ }
+ for _, conn := range p.dbConns {
+ if conn.db == nil {
+ continue
+ }
+ if err := p.doDBQueryStatIOUserTables(conn.db); err != nil {
+ p.Warning(err)
+ }
+ }
+ return nil
+}
+
+func (p *Postgres) doDBQueryStatUserTables(db *sql.DB) error {
+ q := queryStatUserTables()
+
+ var dbname, schema, name string
+ return p.doDBQuery(db, q, func(column, value string, _ bool) {
+ if value == "" && strings.HasPrefix(column, "last_") {
+ value = "-1"
+ }
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ name = value
+ p.getTableMetrics(name, dbname, schema).updated = true
+ case "parent_relname":
+ p.getTableMetrics(name, dbname, schema).parentName = value
+ case "seq_scan":
+ p.getTableMetrics(name, dbname, schema).seqScan = parseInt(value)
+ case "seq_tup_read":
+ p.getTableMetrics(name, dbname, schema).seqTupRead = parseInt(value)
+ case "idx_scan":
+ p.getTableMetrics(name, dbname, schema).idxScan = parseInt(value)
+ case "idx_tup_fetch":
+ p.getTableMetrics(name, dbname, schema).idxTupFetch = parseInt(value)
+ case "n_tup_ins":
+ p.getTableMetrics(name, dbname, schema).nTupIns = parseInt(value)
+ case "n_tup_upd":
+ p.getTableMetrics(name, dbname, schema).nTupUpd.last = parseInt(value)
+ case "n_tup_del":
+ p.getTableMetrics(name, dbname, schema).nTupDel = parseInt(value)
+ case "n_tup_hot_upd":
+ p.getTableMetrics(name, dbname, schema).nTupHotUpd.last = parseInt(value)
+ case "n_live_tup":
+ p.getTableMetrics(name, dbname, schema).nLiveTup = parseInt(value)
+ case "n_dead_tup":
+ p.getTableMetrics(name, dbname, schema).nDeadTup = parseInt(value)
+ case "last_vacuum":
+ p.getTableMetrics(name, dbname, schema).lastVacuumAgo = parseFloat(value)
+ case "last_autovacuum":
+ p.getTableMetrics(name, dbname, schema).lastAutoVacuumAgo = parseFloat(value)
+ case "last_analyze":
+ p.getTableMetrics(name, dbname, schema).lastAnalyzeAgo = parseFloat(value)
+ case "last_autoanalyze":
+ p.getTableMetrics(name, dbname, schema).lastAutoAnalyzeAgo = parseFloat(value)
+ case "vacuum_count":
+ p.getTableMetrics(name, dbname, schema).vacuumCount = parseInt(value)
+ case "autovacuum_count":
+ p.getTableMetrics(name, dbname, schema).autovacuumCount = parseInt(value)
+ case "analyze_count":
+ p.getTableMetrics(name, dbname, schema).analyzeCount = parseInt(value)
+ case "autoanalyze_count":
+ p.getTableMetrics(name, dbname, schema).autoAnalyzeCount = parseInt(value)
+ case "total_relation_size":
+ p.getTableMetrics(name, dbname, schema).totalSize = parseInt(value)
+ }
+ })
+}
+
+func (p *Postgres) doDBQueryStatIOUserTables(db *sql.DB) error {
+ q := queryStatIOUserTables()
+
+ var dbname, schema, name string
+ return p.doDBQuery(db, q, func(column, value string, rowEnd bool) {
+ if value == "" && column != "parent_relname" {
+ value = "-1"
+ }
+ switch column {
+ case "datname":
+ dbname = value
+ case "schemaname":
+ schema = value
+ case "relname":
+ name = value
+ p.getTableMetrics(name, dbname, schema).updated = true
+ case "parent_relname":
+ p.getTableMetrics(name, dbname, schema).parentName = value
+ case "heap_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).heapBlksRead.last = parseInt(value)
+ case "heap_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).heapBlksHit.last = parseInt(value)
+ case "idx_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).idxBlksRead.last = parseInt(value)
+ case "idx_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).idxBlksHit.last = parseInt(value)
+ case "toast_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).toastBlksRead.last = parseInt(value)
+ case "toast_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).toastBlksHit.last = parseInt(value)
+ case "tidx_blks_read_bytes":
+ p.getTableMetrics(name, dbname, schema).tidxBlksRead.last = parseInt(value)
+ case "tidx_blks_hit_bytes":
+ p.getTableMetrics(name, dbname, schema).tidxBlksHit.last = parseInt(value)
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/init.go b/src/go/collectors/go.d.plugin/modules/postgres/init.go
new file mode 100644
index 000000000..315a876a7
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/init.go
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "errors"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/matcher"
+)
+
+func (p *Postgres) validateConfig() error {
+ if p.DSN == "" {
+ return errors.New("DSN not set")
+ }
+ return nil
+}
+
+func (p *Postgres) initDBSelector() (matcher.Matcher, error) {
+ if p.DBSelector == "" {
+ return nil, nil
+ }
+
+ return matcher.NewSimplePatternsMatcher(p.DBSelector)
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md b/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md
new file mode 100644
index 000000000..2122f3d61
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/integrations/postgresql.md
@@ -0,0 +1,382 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/collectors/go.d.plugin/modules/postgres/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml"
+sidebar_label: "PostgreSQL"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: True
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# PostgreSQL
+
+
+<img src="https://netdata.cloud/img/postgres.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: postgres
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.
+
+
+It establishes a connection to the Postgres instance via a TCP or UNIX socket.
+To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:
+
+- 127.0.0.1:5432
+- /var/run/postgresql/
+
+
+#### Limits
+
+Table and index metrics are not collected for databases with more than 50 tables or 250 indexes.
+These limits can be changed in the configuration file.
+
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per PostgreSQL instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.connections_utilization | used | percentage |
+| postgres.connections_usage | available, used | connections |
+| postgres.connections_state_count | active, idle, idle_in_transaction, idle_in_transaction_aborted, disabled | connections |
+| postgres.transactions_duration | a dimension per bucket | transactions/s |
+| postgres.queries_duration | a dimension per bucket | queries/s |
+| postgres.locks_utilization | used | percentage |
+| postgres.checkpoints_rate | scheduled, requested | checkpoints/s |
+| postgres.checkpoints_time | write, sync | milliseconds |
+| postgres.bgwriter_halts_rate | maxwritten | events/s |
+| postgres.buffers_io_rate | checkpoint, backend, bgwriter | B/s |
+| postgres.buffers_backend_fsync_rate | fsync | calls/s |
+| postgres.buffers_allocated_rate | allocated | B/s |
+| postgres.wal_io_rate | write | B/s |
+| postgres.wal_files_count | written, recycled | files |
+| postgres.wal_archiving_files_count | ready, done | files/s |
+| postgres.autovacuum_workers_count | analyze, vacuum_analyze, vacuum, vacuum_freeze, brin_summarize | workers |
+| postgres.txid_exhaustion_towards_autovacuum_perc | emergency_autovacuum | percentage |
+| postgres.txid_exhaustion_perc | txid_exhaustion | percentage |
+| postgres.txid_exhaustion_oldest_txid_num | xid | xid |
+| postgres.catalog_relations_count | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | relations |
+| postgres.catalog_relations_size | ordinary_table, index, sequence, toast_table, view, materialized_view, composite_type, foreign_table, partitioned_table, partitioned_index | B |
+| postgres.uptime | uptime | seconds |
+| postgres.databases_count | databases | databases |
+
+### Per repl application
+
+These metrics refer to the replication application.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| application | application name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.replication_app_wal_lag_size | sent_lag, write_lag, flush_lag, replay_lag | B |
+| postgres.replication_app_wal_lag_time | write_lag, flush_lag, replay_lag | seconds |
+
+### Per repl slot
+
+These metrics refer to the replication slot.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| slot | replication slot name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.replication_slot_files_count | wal_keep, pg_replslot_files | files |
+
+### Per database
+
+These metrics refer to the database.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.db_transactions_ratio | committed, rollback | percentage |
+| postgres.db_transactions_rate | committed, rollback | transactions/s |
+| postgres.db_connections_utilization | used | percentage |
+| postgres.db_connections_count | connections | connections |
+| postgres.db_cache_io_ratio | miss | percentage |
+| postgres.db_io_rate | memory, disk | B/s |
+| postgres.db_ops_fetched_rows_ratio | fetched | percentage |
+| postgres.db_ops_read_rows_rate | returned, fetched | rows/s |
+| postgres.db_ops_write_rows_rate | inserted, deleted, updated | rows/s |
+| postgres.db_conflicts_rate | conflicts | queries/s |
+| postgres.db_conflicts_reason_rate | tablespace, lock, snapshot, bufferpin, deadlock | queries/s |
+| postgres.db_deadlocks_rate | deadlocks | deadlocks/s |
+| postgres.db_locks_held_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |
+| postgres.db_locks_awaited_count | access_share, row_share, row_exclusive, share_update, share, share_row_exclusive, exclusive, access_exclusive | locks |
+| postgres.db_temp_files_created_rate | created | files/s |
+| postgres.db_temp_files_io_rate | written | B/s |
+| postgres.db_size | size | B |
+
+### Per table
+
+These metrics refer to the database table.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+| schema | schema name |
+| table | table name |
+| parent_table | parent table name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.table_rows_dead_ratio | dead | percentage |
+| postgres.table_rows_count | live, dead | rows |
+| postgres.table_ops_rows_rate | inserted, deleted, updated | rows/s |
+| postgres.table_ops_rows_hot_ratio | hot | percentage |
+| postgres.table_ops_rows_hot_rate | hot | rows/s |
+| postgres.table_cache_io_ratio | miss | percentage |
+| postgres.table_io_rate | memory, disk | B/s |
+| postgres.table_index_cache_io_ratio | miss | percentage |
+| postgres.table_index_io_rate | memory, disk | B/s |
+| postgres.table_toast_cache_io_ratio | miss | percentage |
+| postgres.table_toast_io_rate | memory, disk | B/s |
+| postgres.table_toast_index_cache_io_ratio | miss | percentage |
+| postgres.table_toast_index_io_rate | memory, disk | B/s |
+| postgres.table_scans_rate | index, sequential | scans/s |
+| postgres.table_scans_rows_rate | index, sequential | rows/s |
+| postgres.table_autovacuum_since_time | time | seconds |
+| postgres.table_vacuum_since_time | time | seconds |
+| postgres.table_autoanalyze_since_time | time | seconds |
+| postgres.table_analyze_since_time | time | seconds |
+| postgres.table_null_columns | null | columns |
+| postgres.table_size | size | B |
+| postgres.table_bloat_size_perc | bloat | percentage |
+| postgres.table_bloat_size | bloat | B |
+
+### Per index
+
+These metrics refer to the table index.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | database name |
+| schema | schema name |
+| table | table name |
+| parent_table | parent table name |
+| index | index name |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| postgres.index_size | size | B |
+| postgres.index_bloat_size_perc | bloat | percentage |
+| postgres.index_bloat_size | bloat | B |
+| postgres.index_usage_status | used, unused | status |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ postgres_total_connection_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.connections_utilization | average total connection utilization over the last minute |
+| [ postgres_acquired_locks_utilization ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.locks_utilization | average acquired locks utilization over the last minute |
+| [ postgres_txid_exhaustion_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.txid_exhaustion_perc | percent towards TXID wraparound |
+| [ postgres_db_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average cache hit ratio in db ${label:database} over the last minute |
+| [ postgres_db_transactions_rollback_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_cache_io_ratio | average aborted transactions percentage in db ${label:database} over the last five minutes |
+| [ postgres_db_deadlocks_rate ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.db_deadlocks_rate | number of deadlocks detected in db ${label:database} in the last minute |
+| [ postgres_table_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_cache_io_ratio | average cache hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_index_cache_io_ratio | average index cache hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_toast_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_cache_io_ratio | average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_toast_index_cache_io_ratio ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_toast_index_cache_io_ratio | average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute |
+| [ postgres_table_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} |
+| [ postgres_table_last_autovacuum_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autovacuum_since_time | time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon |
+| [ postgres_table_last_autoanalyze_time ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.table_autoanalyze_since_time | time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon |
+| [ postgres_index_bloat_size_perc ](https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf) | postgres.index_bloat_size_perc | bloat size percentage in db ${label:database} table ${label:table} index ${label:index} |
+
+
+## Setup
+
+### Prerequisites
+
+#### Create netdata user
+
+Create a user with granted `pg_monitor`
+or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).
+
+To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:
+
+```postgresql
+CREATE USER netdata;
+GRANT pg_monitor TO netdata;
+```
+
+After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or
+the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your
+system.
+
+
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/postgres.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/postgres.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 5 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| dsn | Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING). | postgres://postgres:postgres@127.0.0.1:5432/postgres | yes |
+| timeout | Query timeout in seconds. | 2 | no |
+| collect_databases_matching | Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#simple-patterns-matcher). | | no |
+| max_db_tables | Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit. | 50 | no |
+| max_db_indexes | Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit. | 250 | no |
+
+</details>
+
+#### Examples
+
+##### TCP socket
+
+An example configuration.
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+```
+##### Unix socket
+
+An example configuration.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+ - name: remote
+ dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `postgres` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m postgres
+ ```
+
+
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml b/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml
new file mode 100644
index 000000000..799dd6d0e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/metadata.yaml
@@ -0,0 +1,750 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-postgres
+ plugin_name: go.d.plugin
+ module_name: postgres
+ monitored_instance:
+ name: PostgreSQL
+ link: https://www.postgresql.org/
+ categories:
+ - data-collection.database-servers
+ icon_filename: postgres.svg
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ alternative_monitored_instances: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - db
+ - database
+ - postgres
+ - postgresql
+ - sql
+ most_popular: true
+ overview:
+ multi_instance: true
+ data_collection:
+ metrics_description: |
+ This collector monitors the activity and performance of Postgres servers, collects replication statistics, metrics for each database, table and index, and more.
+ method_description: |
+ It establishes a connection to the Postgres instance via a TCP or UNIX socket.
+ To collect metrics for database tables and indexes, it establishes an additional connection for each discovered database.
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects instances running on localhost by trying to connect as root and netdata using known PostgreSQL TCP and UNIX sockets:
+
+ - 127.0.0.1:5432
+ - /var/run/postgresql/
+ limits:
+ description: |
+ Table and index metrics are not collected for databases with more than 50 tables or 250 indexes.
+ These limits can be changed in the configuration file.
+ performance_impact:
+ description: ""
+ additional_permissions:
+ description: ""
+ supported_platforms:
+ include: []
+ exclude: []
+ setup:
+ prerequisites:
+ list:
+ - title: Create netdata user
+ description: |
+ Create a user with granted `pg_monitor`
+ or `pg_read_all_stat` [built-in role](https://www.postgresql.org/docs/current/predefined-roles.html).
+
+ To create the `netdata` user with these permissions, execute the following in the psql session, as a user with CREATEROLE privileges:
+
+ ```postgresql
+ CREATE USER netdata;
+ GRANT pg_monitor TO netdata;
+ ```
+
+ After creating the new user, restart the Netdata agent with `sudo systemctl restart netdata`, or
+ the [appropriate method](/docs/netdata-agent/start-stop-restart.md) for your
+ system.
+ configuration:
+ file:
+ name: go.d/postgres.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: dsn
+ description: Postgres server DSN (Data Source Name). See [DSN syntax](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING).
+ default_value: postgres://postgres:postgres@127.0.0.1:5432/postgres
+ required: true
+ - name: timeout
+ description: Query timeout in seconds.
+ default_value: 2
+ required: false
+ - name: collect_databases_matching
+ description: Databases selector. Determines which database metrics will be collected. Syntax is [simple patterns](https://github.com/netdata/netdata/tree/master/src/go/collectors/go.d.plugin/pkg/matcher#simple-patterns-matcher).
+ default_value: ""
+ required: false
+ - name: max_db_tables
+ description: Maximum number of tables in the database. Table metrics will not be collected for databases that have more tables than max_db_tables. 0 means no limit.
+ default_value: 50
+ required: false
+ - name: max_db_indexes
+ description: Maximum number of indexes in the database. Index metrics will not be collected for databases that have more indexes than max_db_indexes. 0 means no limit.
+ default_value: 250
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: TCP socket
+ description: An example configuration.
+ folding:
+ enabled: false
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+ - name: Unix socket
+ description: An example configuration.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'host=/var/run/postgresql dbname=postgres user=netdata'
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ dsn: 'postgresql://netdata@127.0.0.1:5432/postgres'
+
+ - name: remote
+ dsn: 'postgresql://netdata@203.0.113.0:5432/postgres'
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: postgres_total_connection_utilization
+ metric: postgres.connections_utilization
+ info: average total connection utilization over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_acquired_locks_utilization
+ metric: postgres.locks_utilization
+ info: average acquired locks utilization over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_txid_exhaustion_perc
+ metric: postgres.txid_exhaustion_perc
+ info: percent towards TXID wraparound
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_cache_io_ratio
+ metric: postgres.db_cache_io_ratio
+ info: average cache hit ratio in db ${label:database} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_transactions_rollback_ratio
+ metric: postgres.db_cache_io_ratio
+ info: average aborted transactions percentage in db ${label:database} over the last five minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_db_deadlocks_rate
+ metric: postgres.db_deadlocks_rate
+ info: number of deadlocks detected in db ${label:database} in the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_cache_io_ratio
+ metric: postgres.table_cache_io_ratio
+ info: average cache hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_index_cache_io_ratio
+ metric: postgres.table_index_cache_io_ratio
+ info: average index cache hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_toast_cache_io_ratio
+ metric: postgres.table_toast_cache_io_ratio
+ info: average TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_toast_index_cache_io_ratio
+ metric: postgres.table_toast_index_cache_io_ratio
+ info: average index TOAST hit ratio in db ${label:database} table ${label:table} over the last minute
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_bloat_size_perc
+ metric: postgres.table_bloat_size_perc
+ info: bloat size percentage in db ${label:database} table ${label:table}
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_last_autovacuum_time
+ metric: postgres.table_autovacuum_since_time
+ info: time elapsed since db ${label:database} table ${label:table} was vacuumed by the autovacuum daemon
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_table_last_autoanalyze_time
+ metric: postgres.table_autoanalyze_since_time
+ info: time elapsed since db ${label:database} table ${label:table} was analyzed by the autovacuum daemon
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ - name: postgres_index_bloat_size_perc
+ metric: postgres.index_bloat_size_perc
+ info: bloat size percentage in db ${label:database} table ${label:table} index ${label:index}
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/postgres.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: postgres.connections_utilization
+ description: Connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.connections_usage
+ description: Connections usage
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: available
+ - name: used
+ - name: postgres.connections_state_count
+ description: Connections in each state
+ unit: connections
+ chart_type: stacked
+ dimensions:
+ - name: active
+ - name: idle
+ - name: idle_in_transaction
+ - name: idle_in_transaction_aborted
+ - name: disabled
+ - name: postgres.transactions_duration
+ description: Observed transactions time
+ unit: transactions/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: postgres.queries_duration
+ description: Observed active queries time
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per bucket
+ - name: postgres.locks_utilization
+ description: Acquired locks utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.checkpoints_rate
+ description: Checkpoints
+ unit: checkpoints/s
+ chart_type: stacked
+ dimensions:
+ - name: scheduled
+ - name: requested
+ - name: postgres.checkpoints_time
+ description: Checkpoint time
+ unit: milliseconds
+ chart_type: stacked
+ dimensions:
+ - name: write
+ - name: sync
+ - name: postgres.bgwriter_halts_rate
+ description: Background writer scan halts
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: maxwritten
+ - name: postgres.buffers_io_rate
+ description: Buffers written rate
+ unit: B/s
+ chart_type: area
+ dimensions:
+ - name: checkpoint
+ - name: backend
+ - name: bgwriter
+ - name: postgres.buffers_backend_fsync_rate
+ description: Backend fsync calls
+ unit: calls/s
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: postgres.buffers_allocated_rate
+ description: Buffers allocated
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: allocated
+ - name: postgres.wal_io_rate
+ description: Write-Ahead Log writes
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: postgres.wal_files_count
+ description: Write-Ahead Log files
+ unit: files
+ chart_type: stacked
+ dimensions:
+ - name: written
+ - name: recycled
+ - name: postgres.wal_archiving_files_count
+ description: Write-Ahead Log archived files
+ unit: files/s
+ chart_type: stacked
+ dimensions:
+ - name: ready
+ - name: done
+ - name: postgres.autovacuum_workers_count
+ description: Autovacuum workers
+ unit: workers
+ chart_type: line
+ dimensions:
+ - name: analyze
+ - name: vacuum_analyze
+ - name: vacuum
+ - name: vacuum_freeze
+ - name: brin_summarize
+ - name: postgres.txid_exhaustion_towards_autovacuum_perc
+ description: Percent towards emergency autovacuum
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: emergency_autovacuum
+ - name: postgres.txid_exhaustion_perc
+ description: Percent towards transaction ID wraparound
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: txid_exhaustion
+ - name: postgres.txid_exhaustion_oldest_txid_num
+ description: Oldest transaction XID
+ chart_type: line
+ unit: xid
+ dimensions:
+ - name: xid
+ - name: postgres.catalog_relations_count
+ description: Relation count
+ unit: relations
+ chart_type: stacked
+ dimensions:
+ - name: ordinary_table
+ - name: index
+ - name: sequence
+ - name: toast_table
+ - name: view
+ - name: materialized_view
+ - name: composite_type
+ - name: foreign_table
+ - name: partitioned_table
+ - name: partitioned_index
+ - name: postgres.catalog_relations_size
+ description: Relation size
+ unit: B
+ chart_type: stacked
+ dimensions:
+ - name: ordinary_table
+ - name: index
+ - name: sequence
+ - name: toast_table
+ - name: view
+ - name: materialized_view
+ - name: composite_type
+ - name: foreign_table
+ - name: partitioned_table
+ - name: partitioned_index
+ - name: postgres.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: postgres.databases_count
+ description: Number of databases
+ unit: databases
+ chart_type: line
+ dimensions:
+ - name: databases
+ - name: repl application
+ description: These metrics refer to the replication application.
+ labels:
+ - name: application
+ description: application name
+ metrics:
+ - name: postgres.replication_app_wal_lag_size
+ description: Standby application WAL lag size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: sent_lag
+ - name: write_lag
+ - name: flush_lag
+ - name: replay_lag
+ - name: postgres.replication_app_wal_lag_time
+ description: Standby application WAL lag time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: write_lag
+ - name: flush_lag
+ - name: replay_lag
+ - name: repl slot
+ description: These metrics refer to the replication slot.
+ labels:
+ - name: slot
+ description: replication slot name
+ metrics:
+ - name: postgres.replication_slot_files_count
+ description: Replication slot files
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: wal_keep
+ - name: pg_replslot_files
+ - name: database
+ description: These metrics refer to the database.
+ labels:
+ - name: database
+ description: database name
+ metrics:
+ - name: postgres.db_transactions_ratio
+ description: Database transactions ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: rollback
+ - name: postgres.db_transactions_rate
+ description: Database transactions
+ unit: transactions/s
+ chart_type: line
+ dimensions:
+ - name: committed
+ - name: rollback
+ - name: postgres.db_connections_utilization
+ description: Database connections utilization
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: postgres.db_connections_count
+ description: Database connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: connections
+ - name: postgres.db_cache_io_ratio
+ description: Database buffer cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.db_io_rate
+ description: Database reads
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.db_ops_fetched_rows_ratio
+ description: Database rows fetched ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: fetched
+ - name: postgres.db_ops_read_rows_rate
+ description: Database rows read
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: returned
+ - name: fetched
+ - name: postgres.db_ops_write_rows_rate
+ description: Database rows written
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: deleted
+ - name: updated
+ - name: postgres.db_conflicts_rate
+ description: Database canceled queries
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: conflicts
+ - name: postgres.db_conflicts_reason_rate
+ description: Database canceled queries by reason
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: tablespace
+ - name: lock
+ - name: snapshot
+ - name: bufferpin
+ - name: deadlock
+ - name: postgres.db_deadlocks_rate
+ description: Database deadlocks
+ unit: deadlocks/s
+ chart_type: line
+ dimensions:
+ - name: deadlocks
+ - name: postgres.db_locks_held_count
+ description: Database locks held
+ unit: locks
+ chart_type: stacked
+ dimensions:
+ - name: access_share
+ - name: row_share
+ - name: row_exclusive
+ - name: share_update
+ - name: share
+ - name: share_row_exclusive
+ - name: exclusive
+ - name: access_exclusive
+ - name: postgres.db_locks_awaited_count
+ description: Database locks awaited
+ unit: locks
+ chart_type: stacked
+ dimensions:
+ - name: access_share
+ - name: row_share
+ - name: row_exclusive
+ - name: share_update
+ - name: share
+ - name: share_row_exclusive
+ - name: exclusive
+ - name: access_exclusive
+ - name: postgres.db_temp_files_created_rate
+ description: Database created temporary files
+ unit: files/s
+ chart_type: line
+ dimensions:
+ - name: created
+ - name: postgres.db_temp_files_io_rate
+ description: Database temporary files data written to disk
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: written
+ - name: postgres.db_size
+ description: Database size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: table
+ description: These metrics refer to the database table.
+ labels:
+ - name: database
+ description: database name
+ - name: schema
+ description: schema name
+ - name: table
+ description: table name
+ - name: parent_table
+ description: parent table name
+ metrics:
+ - name: postgres.table_rows_dead_ratio
+ description: Table dead rows
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: dead
+ - name: postgres.table_rows_count
+ description: Table total rows
+ unit: rows
+ chart_type: line
+ dimensions:
+ - name: live
+ - name: dead
+ - name: postgres.table_ops_rows_rate
+ description: Table throughput
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: deleted
+ - name: updated
+ - name: postgres.table_ops_rows_hot_ratio
+ description: Table HOT updates ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: hot
+ - name: postgres.table_ops_rows_hot_rate
+ description: Table HOT updates
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: hot
+ - name: postgres.table_cache_io_ratio
+ description: Table I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_io_rate
+ description: Table I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_index_cache_io_ratio
+ description: Table index I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_index_io_rate
+ description: Table index I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_toast_cache_io_ratio
+ description: Table TOAST I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_toast_io_rate
+ description: Table TOAST I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_toast_index_cache_io_ratio
+ description: Table TOAST index I/O cache miss ratio
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: postgres.table_toast_index_io_rate
+ description: Table TOAST index I/O
+ unit: B/s
+ chart_type: line
+ dimensions:
+ - name: memory
+ - name: disk
+ - name: postgres.table_scans_rate
+ description: Table scans
+ unit: scans/s
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: sequential
+ - name: postgres.table_scans_rows_rate
+ description: Table live rows fetched by scans
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: index
+ - name: sequential
+ - name: postgres.table_autovacuum_since_time
+ description: Table time since last auto VACUUM
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_vacuum_since_time
+ description: Table time since last manual VACUUM
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_autoanalyze_since_time
+ description: Table time since last auto ANALYZE
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_analyze_since_time
+ description: Table time since last manual ANALYZE
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: time
+ - name: postgres.table_null_columns
+ description: Table null columns
+ unit: columns
+ chart_type: line
+ dimensions:
+ - name: "null"
+ - name: postgres.table_size
+ description: Table total size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: postgres.table_bloat_size_perc
+ description: Table bloat size percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.table_bloat_size
+ description: Table bloat size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: index
+ description: These metrics refer to the table index.
+ labels:
+ - name: database
+ description: database name
+ - name: schema
+ description: schema name
+ - name: table
+ description: table name
+ - name: parent_table
+ description: parent table name
+ - name: index
+ description: index name
+ metrics:
+ - name: postgres.index_size
+ description: Index size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: size
+ - name: postgres.index_bloat_size_perc
+ description: Index bloat size percentage
+ unit: percentage
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.index_bloat_size
+ description: Index bloat size
+ unit: B
+ chart_type: line
+ dimensions:
+ - name: bloat
+ - name: postgres.index_usage_status
+ description: Index usage status
+ unit: status
+ chart_type: line
+ dimensions:
+ - name: used
+ - name: unused
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/metrics.go b/src/go/collectors/go.d.plugin/modules/postgres/metrics.go
new file mode 100644
index 000000000..b60fbdf8a
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/metrics.go
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import "github.com/netdata/netdata/go/go.d.plugin/pkg/metrics"
+
+type pgMetrics struct {
+ srvMetrics
+ dbs map[string]*dbMetrics
+ tables map[string]*tableMetrics
+ indexes map[string]*indexMetrics
+ replApps map[string]*replStandbyAppMetrics
+ replSlots map[string]*replSlotMetrics
+}
+
+type srvMetrics struct {
+ xactTimeHist metrics.Histogram
+ queryTimeHist metrics.Histogram
+
+ maxConnections int64
+ maxLocksHeld int64
+
+ uptime int64
+
+ relkindOrdinaryTable int64
+ relkindIndex int64
+ relkindSequence int64
+ relkindTOASTTable int64
+ relkindView int64
+ relkindMatView int64
+ relkindCompositeType int64
+ relkindForeignTable int64
+ relkindPartitionedTable int64
+ relkindPartitionedIndex int64
+ relkindOrdinaryTableSize int64
+ relkindIndexSize int64
+ relkindSequenceSize int64
+ relkindTOASTTableSize int64
+ relkindViewSize int64
+ relkindMatViewSize int64
+ relkindCompositeTypeSize int64
+ relkindForeignTableSize int64
+ relkindPartitionedTableSize int64
+ relkindPartitionedIndexSize int64
+
+ connUsed int64
+ connStateActive int64
+ connStateIdle int64
+ connStateIdleInTrans int64
+ connStateIdleInTransAborted int64
+ connStateFastpathFunctionCall int64
+ connStateDisabled int64
+
+ checkpointsTimed int64
+ checkpointsReq int64
+ checkpointWriteTime int64
+ checkpointSyncTime int64
+ buffersCheckpoint int64
+ buffersClean int64
+ maxwrittenClean int64
+ buffersBackend int64
+ buffersBackendFsync int64
+ buffersAlloc int64
+
+ oldestXID int64
+ percentTowardsWraparound int64
+ percentTowardsEmergencyAutovacuum int64
+
+ walWrites int64
+ walRecycledFiles int64
+ walWrittenFiles int64
+ walArchiveFilesReady int64
+ walArchiveFilesDone int64
+
+ autovacuumWorkersAnalyze int64
+ autovacuumWorkersVacuumAnalyze int64
+ autovacuumWorkersVacuum int64
+ autovacuumWorkersVacuumFreeze int64
+ autovacuumWorkersBrinSummarize int64
+}
+
+type dbMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ numBackends int64
+ datConnLimit int64
+ xactCommit int64
+ xactRollback int64
+ blksRead incDelta
+ blksHit incDelta
+ tupReturned incDelta
+ tupFetched incDelta
+ tupInserted int64
+ tupUpdated int64
+ tupDeleted int64
+ conflicts int64
+ tempFiles int64
+ tempBytes int64
+ deadlocks int64
+
+ size *int64 // need 'connect' privilege for pg_database_size()
+
+ conflTablespace int64
+ conflLock int64
+ conflSnapshot int64
+ conflBufferpin int64
+ conflDeadlock int64
+
+ accessShareLockHeld int64
+ rowShareLockHeld int64
+ rowExclusiveLockHeld int64
+ shareUpdateExclusiveLockHeld int64
+ shareLockHeld int64
+ shareRowExclusiveLockHeld int64
+ exclusiveLockHeld int64
+ accessExclusiveLockHeld int64
+ accessShareLockAwaited int64
+ rowShareLockAwaited int64
+ rowExclusiveLockAwaited int64
+ shareUpdateExclusiveLockAwaited int64
+ shareLockAwaited int64
+ shareRowExclusiveLockAwaited int64
+ exclusiveLockAwaited int64
+ accessExclusiveLockAwaited int64
+}
+
+type replStandbyAppMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ walSentDelta int64
+ walWriteDelta int64
+ walFlushDelta int64
+ walReplayDelta int64
+
+ walWriteLag int64
+ walFlushLag int64
+ walReplayLag int64
+}
+
+type replSlotMetrics struct {
+ name string
+
+ updated bool
+ hasCharts bool
+
+ walKeep int64
+ files int64
+}
+
+type tableMetrics struct {
+ name string
+ parentName string
+ db string
+ schema string
+
+ updated bool
+ hasCharts bool
+ hasLastAutoVacuumChart bool
+ hasLastVacuumChart bool
+ hasLastAutoAnalyzeChart bool
+ hasLastAnalyzeChart bool
+ hasTableIOCharts bool
+ hasTableIdxIOCharts bool
+ hasTableTOASTIOCharts bool
+ hasTableTOASTIdxIOCharts bool
+
+ // pg_stat_user_tables
+ seqScan int64
+ seqTupRead int64
+ idxScan int64
+ idxTupFetch int64
+ nTupIns int64
+ nTupUpd incDelta
+ nTupDel int64
+ nTupHotUpd incDelta
+ nLiveTup int64
+ nDeadTup int64
+ lastVacuumAgo int64
+ lastAutoVacuumAgo int64
+ lastAnalyzeAgo int64
+ lastAutoAnalyzeAgo int64
+ vacuumCount int64
+ autovacuumCount int64
+ analyzeCount int64
+ autoAnalyzeCount int64
+
+ // pg_statio_user_tables
+ heapBlksRead incDelta
+ heapBlksHit incDelta
+ idxBlksRead incDelta
+ idxBlksHit incDelta
+ toastBlksRead incDelta
+ toastBlksHit incDelta
+ tidxBlksRead incDelta
+ tidxBlksHit incDelta
+
+ totalSize int64
+
+ bloatSize *int64 // need 'SELECT' access to the table
+ bloatSizePerc *int64 // need 'SELECT' access to the table
+ nullColumns *int64 // need 'SELECT' access to the table
+}
+
+type indexMetrics struct {
+ name string
+ db string
+ schema string
+ table string
+ parentTable string
+
+ updated bool
+ hasCharts bool
+
+ idxScan int64
+ idxTupRead int64
+ idxTupFetch int64
+
+ size int64
+
+ bloatSize *int64 // need 'SELECT' access to the table
+ bloatSizePerc *int64 // need 'SELECT' access to the table
+}
+type incDelta struct{ prev, last int64 }
+
+func (pc *incDelta) delta() int64 { return pc.last - pc.prev }
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/postgres.go b/src/go/collectors/go.d.plugin/modules/postgres/postgres.go
new file mode 100644
index 000000000..7928c0b6d
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/postgres.go
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "database/sql"
+ _ "embed"
+ "errors"
+ "sync"
+ "time"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/matcher"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/metrics"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+
+ "github.com/jackc/pgx/v4/stdlib"
+ _ "github.com/jackc/pgx/v4/stdlib"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("postgres", module.Creator{
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ })
+}
+
+func New() *Postgres {
+ return &Postgres{
+ Config: Config{
+ Timeout: web.Duration(time.Second * 2),
+ DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres",
+ XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
+ QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
+ // charts: 20 x table, 4 x index.
+ // https://discord.com/channels/847502280503590932/1022693928874549368
+ MaxDBTables: 50,
+ MaxDBIndexes: 250,
+ },
+ charts: baseCharts.Copy(),
+ dbConns: make(map[string]*dbConn),
+ mx: &pgMetrics{
+ dbs: make(map[string]*dbMetrics),
+ indexes: make(map[string]*indexMetrics),
+ tables: make(map[string]*tableMetrics),
+ replApps: make(map[string]*replStandbyAppMetrics),
+ replSlots: make(map[string]*replSlotMetrics),
+ },
+ recheckSettingsEvery: time.Minute * 30,
+ doSlowEvery: time.Minute * 5,
+ addXactQueryRunningTimeChartsOnce: &sync.Once{},
+ addWALFilesChartsOnce: &sync.Once{},
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout,omitempty" json:"timeout"`
+ DBSelector string `yaml:"collect_databases_matching,omitempty" json:"collect_databases_matching"`
+ XactTimeHistogram []float64 `yaml:"transaction_time_histogram,omitempty" json:"transaction_time_histogram"`
+ QueryTimeHistogram []float64 `yaml:"query_time_histogram,omitempty" json:"query_time_histogram"`
+ MaxDBTables int64 `yaml:"max_db_tables" json:"max_db_tables"`
+ MaxDBIndexes int64 `yaml:"max_db_indexes" json:"max_db_indexes"`
+}
+
+type (
+ Postgres struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addXactQueryRunningTimeChartsOnce *sync.Once
+ addWALFilesChartsOnce *sync.Once
+
+ db *sql.DB
+ dbConns map[string]*dbConn
+
+ superUser *bool
+ pgIsInRecovery *bool
+ pgVersion int
+ dbSr matcher.Matcher
+ recheckSettingsTime time.Time
+ recheckSettingsEvery time.Duration
+ doSlowTime time.Time
+ doSlowEvery time.Duration
+
+ mx *pgMetrics
+ }
+ dbConn struct {
+ db *sql.DB
+ connStr string
+ connErrors int
+ }
+)
+
+func (p *Postgres) Configuration() any {
+ return p.Config
+}
+
+func (p *Postgres) Init() error {
+ err := p.validateConfig()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+
+ sr, err := p.initDBSelector()
+ if err != nil {
+ p.Errorf("config validation: %v", err)
+ return err
+ }
+ p.dbSr = sr
+
+ p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram)
+ p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram)
+
+ return nil
+}
+
+func (p *Postgres) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
+
+func (p *Postgres) Charts() *module.Charts {
+ return p.charts
+}
+
+func (p *Postgres) Collect() map[string]int64 {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+ return mx
+}
+
+func (p *Postgres) Cleanup() {
+ if p.db == nil {
+ return
+ }
+ if err := p.db.Close(); err != nil {
+ p.Warningf("cleanup: error on closing the Postgres database [%s]: %v", p.DSN, err)
+ }
+ p.db = nil
+
+ for dbname, conn := range p.dbConns {
+ delete(p.dbConns, dbname)
+ if conn.connStr != "" {
+ stdlib.UnregisterConnConfig(conn.connStr)
+ }
+ if conn.db != nil {
+ _ = conn.db.Close()
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go b/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go
new file mode 100644
index 000000000..051f9c38d
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/postgres_test.go
@@ -0,0 +1,731 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+import (
+ "bufio"
+ "bytes"
+ "database/sql/driver"
+ "errors"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/matcher"
+
+ "github.com/DATA-DOG/go-sqlmock"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer140004ServerVersionNum, _ = os.ReadFile("testdata/v14.4/server_version_num.txt")
+ dataVer140004IsSuperUserFalse, _ = os.ReadFile("testdata/v14.4/is_super_user-false.txt")
+ dataVer140004IsSuperUserTrue, _ = os.ReadFile("testdata/v14.4/is_super_user-true.txt")
+ dataVer140004PGIsInRecoveryTrue, _ = os.ReadFile("testdata/v14.4/pg_is_in_recovery-true.txt")
+ dataVer140004SettingsMaxConnections, _ = os.ReadFile("testdata/v14.4/settings_max_connections.txt")
+ dataVer140004SettingsMaxLocksHeld, _ = os.ReadFile("testdata/v14.4/settings_max_locks_held.txt")
+ dataVer140004ServerCurrentConnections, _ = os.ReadFile("testdata/v14.4/server_current_connections.txt")
+ dataVer140004ServerConnectionsState, _ = os.ReadFile("testdata/v14.4/server_connections_state.txt")
+ dataVer140004Checkpoints, _ = os.ReadFile("testdata/v14.4/checkpoints.txt")
+ dataVer140004ServerUptime, _ = os.ReadFile("testdata/v14.4/uptime.txt")
+ dataVer140004TXIDWraparound, _ = os.ReadFile("testdata/v14.4/txid_wraparound.txt")
+ dataVer140004WALWrites, _ = os.ReadFile("testdata/v14.4/wal_writes.txt")
+ dataVer140004WALFiles, _ = os.ReadFile("testdata/v14.4/wal_files.txt")
+ dataVer140004WALArchiveFiles, _ = os.ReadFile("testdata/v14.4/wal_archive_files.txt")
+ dataVer140004CatalogRelations, _ = os.ReadFile("testdata/v14.4/catalog_relations.txt")
+ dataVer140004AutovacuumWorkers, _ = os.ReadFile("testdata/v14.4/autovacuum_workers.txt")
+ dataVer140004XactQueryRunningTime, _ = os.ReadFile("testdata/v14.4/xact_query_running_time.txt")
+ dataVer140004ReplStandbyAppDelta, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_delta.txt")
+ dataVer140004ReplStandbyAppLag, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_lag.txt")
+ dataVer140004ReplSlotFiles, _ = os.ReadFile("testdata/v14.4/replication_slot_files.txt")
+ dataVer140004DatabaseStats, _ = os.ReadFile("testdata/v14.4/database_stats.txt")
+ dataVer140004DatabaseSize, _ = os.ReadFile("testdata/v14.4/database_size.txt")
+ dataVer140004DatabaseConflicts, _ = os.ReadFile("testdata/v14.4/database_conflicts.txt")
+ dataVer140004DatabaseLocks, _ = os.ReadFile("testdata/v14.4/database_locks.txt")
+ dataVer140004QueryableDatabaseList, _ = os.ReadFile("testdata/v14.4/queryable_database_list.txt")
+ dataVer140004StatUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_tables_db_postgres.txt")
+ dataVer140004StatIOUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/statio_user_tables_db_postgres.txt")
+ dataVer140004StatUserIndexesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_indexes_db_postgres.txt")
+ dataVer140004Bloat, _ = os.ReadFile("testdata/v14.4/bloat_tables.txt")
+ dataVer140004ColumnsStats, _ = os.ReadFile("testdata/v14.4/table_columns_stats.txt")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer140004ServerVersionNum": dataVer140004ServerVersionNum,
+ "dataVer140004IsSuperUserFalse": dataVer140004IsSuperUserFalse,
+ "dataVer140004IsSuperUserTrue": dataVer140004IsSuperUserTrue,
+ "dataVer140004PGIsInRecoveryTrue": dataVer140004PGIsInRecoveryTrue,
+ "dataVer140004SettingsMaxConnections": dataVer140004SettingsMaxConnections,
+ "dataVer140004SettingsMaxLocksHeld": dataVer140004SettingsMaxLocksHeld,
+ "dataVer140004ServerCurrentConnections": dataVer140004ServerCurrentConnections,
+ "dataVer140004ServerConnectionsState": dataVer140004ServerConnectionsState,
+ "dataVer140004Checkpoints": dataVer140004Checkpoints,
+ "dataVer140004ServerUptime": dataVer140004ServerUptime,
+ "dataVer140004TXIDWraparound": dataVer140004TXIDWraparound,
+ "dataVer140004WALWrites": dataVer140004WALWrites,
+ "dataVer140004WALFiles": dataVer140004WALFiles,
+ "dataVer140004WALArchiveFiles": dataVer140004WALArchiveFiles,
+ "dataVer140004CatalogRelations": dataVer140004CatalogRelations,
+ "dataVer140004AutovacuumWorkers": dataVer140004AutovacuumWorkers,
+ "dataVer140004XactQueryRunningTime": dataVer140004XactQueryRunningTime,
+ "dataV14004ReplStandbyAppDelta": dataVer140004ReplStandbyAppDelta,
+ "dataV14004ReplStandbyAppLag": dataVer140004ReplStandbyAppLag,
+ "dataVer140004ReplSlotFiles": dataVer140004ReplSlotFiles,
+ "dataVer140004DatabaseStats": dataVer140004DatabaseStats,
+ "dataVer140004DatabaseSize": dataVer140004DatabaseSize,
+ "dataVer140004DatabaseConflicts": dataVer140004DatabaseConflicts,
+ "dataVer140004DatabaseLocks": dataVer140004DatabaseLocks,
+ "dataVer140004QueryableDatabaseList": dataVer140004QueryableDatabaseList,
+ "dataVer140004StatUserTablesDBPostgres": dataVer140004StatUserTablesDBPostgres,
+ "dataVer140004StatIOUserTablesDBPostgres": dataVer140004StatIOUserTablesDBPostgres,
+ "dataVer140004StatUserIndexesDBPostgres": dataVer140004StatUserIndexesDBPostgres,
+ "dataVer140004Bloat": dataVer140004Bloat,
+ "dataVer140004ColumnsStats": dataVer140004ColumnsStats,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPostgres_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Postgres{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestPostgres_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "Success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "Fail when DSN not set": {
+ wantFail: true,
+ config: Config{DSN: ""},
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ pg := New()
+ pg.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, pg.Init())
+ } else {
+ assert.NoError(t, pg.Init())
+ }
+ })
+ }
+}
+
+func TestPostgres_Cleanup(t *testing.T) {
+
+}
+
+func TestPostgres_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestPostgres_Check(t *testing.T) {
+ tests := map[string]struct {
+ prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock)
+ wantFail bool
+ }{
+ "Success when all queries are successful (v14.4)": {
+ wantFail: false,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ pg.dbSr = matcher.TRUE()
+
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
+ },
+ },
+ "Fail when the second query unsuccessful (v14.4)": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpectErr(m, queryServerConnectionsState())
+ },
+ },
+ "Fail when querying the database version returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryServerVersion())
+ },
+ },
+ "Fail when querying settings max connection returns an error": {
+ wantFail: true,
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpectErr(m, querySettingsMaxConnections())
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ pg := New()
+ pg.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, pg.Init())
+
+ test.prepareMock(t, pg, mock)
+
+ if test.wantFail {
+ assert.Error(t, pg.Check())
+ } else {
+ assert.NoError(t, pg.Check())
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func TestPostgres_Collect(t *testing.T) {
+ type testCaseStep struct {
+ prepareMock func(t *testing.T, pg *Postgres, mock sqlmock.Sqlmock)
+ check func(t *testing.T, pg *Postgres)
+ }
+ tests := map[string][]testCaseStep{
+ "Success on all queries, collect all dbs (v14.4)": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ pg.dbSr = matcher.TRUE()
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+
+ expected := map[string]int64{
+ "autovacuum_analyze": 0,
+ "autovacuum_brin_summarize": 0,
+ "autovacuum_vacuum": 0,
+ "autovacuum_vacuum_analyze": 0,
+ "autovacuum_vacuum_freeze": 0,
+ "buffers_alloc": 27295744,
+ "buffers_backend": 0,
+ "buffers_backend_fsync": 0,
+ "buffers_checkpoint": 32768,
+ "buffers_clean": 0,
+ "catalog_relkind_I_count": 0,
+ "catalog_relkind_I_size": 0,
+ "catalog_relkind_S_count": 0,
+ "catalog_relkind_S_size": 0,
+ "catalog_relkind_c_count": 0,
+ "catalog_relkind_c_size": 0,
+ "catalog_relkind_f_count": 0,
+ "catalog_relkind_f_size": 0,
+ "catalog_relkind_i_count": 155,
+ "catalog_relkind_i_size": 3678208,
+ "catalog_relkind_m_count": 0,
+ "catalog_relkind_m_size": 0,
+ "catalog_relkind_p_count": 0,
+ "catalog_relkind_p_size": 0,
+ "catalog_relkind_r_count": 66,
+ "catalog_relkind_r_size": 3424256,
+ "catalog_relkind_t_count": 38,
+ "catalog_relkind_t_size": 548864,
+ "catalog_relkind_v_count": 137,
+ "catalog_relkind_v_size": 0,
+ "checkpoint_sync_time": 47,
+ "checkpoint_write_time": 167,
+ "checkpoints_req": 16,
+ "checkpoints_timed": 1814,
+ "databases_count": 2,
+ "db_postgres_blks_hit": 1221125,
+ "db_postgres_blks_read": 3252,
+ "db_postgres_blks_read_perc": 0,
+ "db_postgres_confl_bufferpin": 0,
+ "db_postgres_confl_deadlock": 0,
+ "db_postgres_confl_lock": 0,
+ "db_postgres_confl_snapshot": 0,
+ "db_postgres_confl_tablespace": 0,
+ "db_postgres_conflicts": 0,
+ "db_postgres_deadlocks": 0,
+ "db_postgres_lock_mode_AccessExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_AccessExclusiveLock_held": 0,
+ "db_postgres_lock_mode_AccessShareLock_awaited": 0,
+ "db_postgres_lock_mode_AccessShareLock_held": 99,
+ "db_postgres_lock_mode_ExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ExclusiveLock_held": 0,
+ "db_postgres_lock_mode_RowExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_RowExclusiveLock_held": 99,
+ "db_postgres_lock_mode_RowShareLock_awaited": 0,
+ "db_postgres_lock_mode_RowShareLock_held": 99,
+ "db_postgres_lock_mode_ShareLock_awaited": 0,
+ "db_postgres_lock_mode_ShareLock_held": 0,
+ "db_postgres_lock_mode_ShareRowExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ShareRowExclusiveLock_held": 0,
+ "db_postgres_lock_mode_ShareUpdateExclusiveLock_awaited": 0,
+ "db_postgres_lock_mode_ShareUpdateExclusiveLock_held": 0,
+ "db_postgres_numbackends": 3,
+ "db_postgres_numbackends_utilization": 10,
+ "db_postgres_size": 8758051,
+ "db_postgres_temp_bytes": 0,
+ "db_postgres_temp_files": 0,
+ "db_postgres_tup_deleted": 0,
+ "db_postgres_tup_fetched": 359833,
+ "db_postgres_tup_fetched_perc": 2,
+ "db_postgres_tup_inserted": 0,
+ "db_postgres_tup_returned": 13207245,
+ "db_postgres_tup_updated": 0,
+ "db_postgres_xact_commit": 1438660,
+ "db_postgres_xact_rollback": 70,
+ "db_production_blks_hit": 0,
+ "db_production_blks_read": 0,
+ "db_production_blks_read_perc": 0,
+ "db_production_confl_bufferpin": 0,
+ "db_production_confl_deadlock": 0,
+ "db_production_confl_lock": 0,
+ "db_production_confl_snapshot": 0,
+ "db_production_confl_tablespace": 0,
+ "db_production_conflicts": 0,
+ "db_production_deadlocks": 0,
+ "db_production_lock_mode_AccessExclusiveLock_awaited": 0,
+ "db_production_lock_mode_AccessExclusiveLock_held": 0,
+ "db_production_lock_mode_AccessShareLock_awaited": 0,
+ "db_production_lock_mode_AccessShareLock_held": 0,
+ "db_production_lock_mode_ExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ExclusiveLock_held": 0,
+ "db_production_lock_mode_RowExclusiveLock_awaited": 0,
+ "db_production_lock_mode_RowExclusiveLock_held": 0,
+ "db_production_lock_mode_RowShareLock_awaited": 0,
+ "db_production_lock_mode_RowShareLock_held": 0,
+ "db_production_lock_mode_ShareLock_awaited": 99,
+ "db_production_lock_mode_ShareLock_held": 0,
+ "db_production_lock_mode_ShareRowExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ShareRowExclusiveLock_held": 0,
+ "db_production_lock_mode_ShareUpdateExclusiveLock_awaited": 0,
+ "db_production_lock_mode_ShareUpdateExclusiveLock_held": 99,
+ "db_production_numbackends": 1,
+ "db_production_numbackends_utilization": 1,
+ "db_production_size": 8602115,
+ "db_production_temp_bytes": 0,
+ "db_production_temp_files": 0,
+ "db_production_tup_deleted": 0,
+ "db_production_tup_fetched": 0,
+ "db_production_tup_fetched_perc": 0,
+ "db_production_tup_inserted": 0,
+ "db_production_tup_returned": 0,
+ "db_production_tup_updated": 0,
+ "db_production_xact_commit": 0,
+ "db_production_xact_rollback": 0,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_email_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_pkey_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_size": 8192,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_unused": 1,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_myschema_usage_status_used": 0,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_size": 8192,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_unused": 1,
+ "index_myaccounts_username_key_table_myaccounts_db_postgres_schema_public_usage_status_used": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_size": 112336896,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_unused": 0,
+ "index_pgbench_accounts_pkey_table_pgbench_accounts_db_postgres_schema_public_usage_status_used": 1,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_size": 16384,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_unused": 1,
+ "index_pgbench_branches_pkey_table_pgbench_branches_db_postgres_schema_public_usage_status_used": 0,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_size": 32768,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_unused": 1,
+ "index_pgbench_tellers_pkey_table_pgbench_tellers_db_postgres_schema_public_usage_status_used": 0,
+ "locks_utilization": 6,
+ "maxwritten_clean": 0,
+ "oldest_current_xid": 9,
+ "percent_towards_emergency_autovacuum": 0,
+ "percent_towards_wraparound": 0,
+ "query_running_time_hist_bucket_1": 1,
+ "query_running_time_hist_bucket_2": 0,
+ "query_running_time_hist_bucket_3": 0,
+ "query_running_time_hist_bucket_4": 0,
+ "query_running_time_hist_bucket_5": 0,
+ "query_running_time_hist_bucket_6": 0,
+ "query_running_time_hist_bucket_inf": 0,
+ "query_running_time_hist_count": 1,
+ "query_running_time_hist_sum": 0,
+ "repl_slot_ocean_replslot_files": 0,
+ "repl_slot_ocean_replslot_wal_keep": 0,
+ "repl_standby_app_phys-standby2_wal_flush_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_flush_lag_time": 0,
+ "repl_standby_app_phys-standby2_wal_replay_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_replay_lag_time": 0,
+ "repl_standby_app_phys-standby2_wal_sent_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_write_lag_size": 0,
+ "repl_standby_app_phys-standby2_wal_write_time": 0,
+ "repl_standby_app_walreceiver_wal_flush_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_flush_lag_time": 2,
+ "repl_standby_app_walreceiver_wal_replay_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_replay_lag_time": 2,
+ "repl_standby_app_walreceiver_wal_sent_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_write_lag_size": 2,
+ "repl_standby_app_walreceiver_wal_write_time": 2,
+ "server_connections_available": 97,
+ "server_connections_state_active": 1,
+ "server_connections_state_disabled": 1,
+ "server_connections_state_fastpath_function_call": 1,
+ "server_connections_state_idle": 14,
+ "server_connections_state_idle_in_transaction": 7,
+ "server_connections_state_idle_in_transaction_aborted": 1,
+ "server_connections_used": 3,
+ "server_connections_utilization": 3,
+ "server_uptime": 499906,
+ "table_pgbench_accounts_db_postgres_schema_public_bloat_size": 9863168,
+ "table_pgbench_accounts_db_postgres_schema_public_bloat_size_perc": 1,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_hit": 224484753408,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read": 1803882668032,
+ "table_pgbench_accounts_db_postgres_schema_public_heap_blks_read_perc": 88,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_hit": 7138635948032,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read": 973310976000,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_blks_read_perc": 11,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_scan": 99955,
+ "table_pgbench_accounts_db_postgres_schema_public_idx_tup_fetch": 99955,
+ "table_pgbench_accounts_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_accounts_db_postgres_schema_public_last_vacuum_ago": 377149,
+ "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup": 1000048,
+ "table_pgbench_accounts_db_postgres_schema_public_n_dead_tup_perc": 16,
+ "table_pgbench_accounts_db_postgres_schema_public_n_live_tup": 5000048,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_ins": 5000000,
+ "table_pgbench_accounts_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_accounts_db_postgres_schema_public_seq_scan": 2,
+ "table_pgbench_accounts_db_postgres_schema_public_seq_tup_read": 5000000,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_accounts_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_accounts_db_postgres_schema_public_total_size": 784031744,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_hit": 304316416,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_read": 507150336,
+ "table_pgbench_branches_db_postgres_schema_public_heap_blks_read_perc": 62,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_hit": 101441536,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_read": 101425152,
+ "table_pgbench_branches_db_postgres_schema_public_idx_blks_read_perc": 49,
+ "table_pgbench_branches_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_branches_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_branches_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_branches_db_postgres_schema_public_last_vacuum_ago": 371719,
+ "table_pgbench_branches_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_live_tup": 50,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_ins": 50,
+ "table_pgbench_branches_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_branches_db_postgres_schema_public_seq_scan": 6,
+ "table_pgbench_branches_db_postgres_schema_public_seq_tup_read": 300,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_branches_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_branches_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_branches_db_postgres_schema_public_total_size": 57344,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_hit": 0,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_read": 0,
+ "table_pgbench_history_db_postgres_schema_public_heap_blks_read_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_idx_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_history_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_history_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_history_db_postgres_schema_public_last_vacuum_ago": 377149,
+ "table_pgbench_history_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_live_tup": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_ins": 0,
+ "table_pgbench_history_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_history_db_postgres_schema_public_seq_scan": 0,
+ "table_pgbench_history_db_postgres_schema_public_seq_tup_read": 0,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_history_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_history_db_postgres_schema_public_total_size": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_hit": 491937792,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read": 623828992,
+ "table_pgbench_tellers_db_postgres_schema_public_heap_blks_read_perc": 55,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_hit": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read": 101433344,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_blks_read_perc": 100,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_scan": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_idx_tup_fetch": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_last_analyze_ago": 377149,
+ "table_pgbench_tellers_db_postgres_schema_public_last_vacuum_ago": 371719,
+ "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_dead_tup_perc": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_live_tup": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_del": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_hot_upd_perc": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_ins": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_n_tup_upd": 0,
+ "table_pgbench_tellers_db_postgres_schema_public_null_columns": 1,
+ "table_pgbench_tellers_db_postgres_schema_public_seq_scan": 1,
+ "table_pgbench_tellers_db_postgres_schema_public_seq_tup_read": 500,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_hit": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_tidx_blks_read_perc": 50,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_hit": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read": -1,
+ "table_pgbench_tellers_db_postgres_schema_public_toast_blks_read_perc": 50,
+ "table_pgbench_tellers_db_postgres_schema_public_total_size": 90112,
+ "transaction_running_time_hist_bucket_1": 1,
+ "transaction_running_time_hist_bucket_2": 0,
+ "transaction_running_time_hist_bucket_3": 0,
+ "transaction_running_time_hist_bucket_4": 0,
+ "transaction_running_time_hist_bucket_5": 0,
+ "transaction_running_time_hist_bucket_6": 0,
+ "transaction_running_time_hist_bucket_inf": 7,
+ "transaction_running_time_hist_count": 8,
+ "transaction_running_time_hist_sum": 4022,
+ "wal_archive_files_done_count": 1,
+ "wal_archive_files_ready_count": 1,
+ "wal_recycled_files": 0,
+ "wal_writes": 24103144,
+ "wal_written_files": 1,
+ }
+
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying the database version returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpectErr(m, queryServerVersion())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying settings max connections returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpectErr(m, querySettingsMaxConnections())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ "Fail when querying the server connections returns an error": {
+ {
+ prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpectErr(m, queryServerCurrentConnectionsUsed())
+ },
+ check: func(t *testing.T, pg *Postgres) {
+ mx := pg.Collect()
+ var expected map[string]int64
+ assert.Equal(t, expected, mx)
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ db, mock, err := sqlmock.New(
+ sqlmock.QueryMatcherOption(sqlmock.QueryMatcherEqual),
+ )
+ require.NoError(t, err)
+ pg := New()
+ pg.db = db
+ defer func() { _ = db.Close() }()
+
+ require.NoError(t, pg.Init())
+
+ for i, step := range test {
+ t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
+ step.prepareMock(t, pg, mock)
+ step.check(t, pg)
+ })
+ }
+ assert.NoError(t, mock.ExpectationsWereMet())
+ })
+ }
+}
+
+func mockExpect(t *testing.T, mock sqlmock.Sqlmock, query string, rows []byte) {
+ mock.ExpectQuery(query).WillReturnRows(mustMockRows(t, rows)).RowsWillBeClosed()
+}
+
+func mockExpectErr(mock sqlmock.Sqlmock, query string) {
+ mock.ExpectQuery(query).WillReturnError(errors.New("mock error"))
+}
+
+func mustMockRows(t *testing.T, data []byte) *sqlmock.Rows {
+ rows, err := prepareMockRows(data)
+ require.NoError(t, err)
+ return rows
+}
+
+func prepareMockRows(data []byte) (*sqlmock.Rows, error) {
+ r := bytes.NewReader(data)
+ sc := bufio.NewScanner(r)
+
+ var numColumns int
+ var rows *sqlmock.Rows
+
+ for sc.Scan() {
+ s := strings.TrimSpace(sc.Text())
+ if s == "" || strings.HasPrefix(s, "---") {
+ continue
+ }
+
+ parts := strings.Split(s, "|")
+ for i, v := range parts {
+ parts[i] = strings.TrimSpace(v)
+ }
+
+ if rows == nil {
+ numColumns = len(parts)
+ rows = sqlmock.NewRows(parts)
+ continue
+ }
+
+ if len(parts) != numColumns {
+ return nil, fmt.Errorf("prepareMockRows(): columns != values (%d/%d)", numColumns, len(parts))
+ }
+
+ values := make([]driver.Value, len(parts))
+ for i, v := range parts {
+ values[i] = v
+ }
+ rows.AddRow(values...)
+ }
+
+ if rows == nil {
+ return nil, errors.New("prepareMockRows(): nil rows result")
+ }
+
+ return rows, nil
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/queries.go b/src/go/collectors/go.d.plugin/modules/postgres/queries.go
new file mode 100644
index 000000000..f6afc9342
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/queries.go
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package postgres
+
+func queryServerVersion() string {
+ return "SHOW server_version_num;"
+}
+
+func queryIsSuperUser() string {
+ return "SELECT current_setting('is_superuser') = 'on' AS is_superuser;"
+}
+
+func queryPGIsInRecovery() string {
+ return "SELECT pg_is_in_recovery();"
+}
+
+func querySettingsMaxConnections() string {
+ return "SELECT current_setting('max_connections')::INT - current_setting('superuser_reserved_connections')::INT;"
+}
+
+func querySettingsMaxLocksHeld() string {
+ return `
+SELECT current_setting('max_locks_per_transaction')::INT *
+ (current_setting('max_connections')::INT + current_setting('max_prepared_transactions')::INT);
+`
+}
+
+// TODO: this is not correct and we should use pg_stat_activity.
+// But we need to check what connections (backend_type) count towards 'max_connections'.
+// I think python version query doesn't count it correctly.
+// https://github.com/netdata/netdata/blob/1782e2d002bc5203128e5a5d2b801010e2822d2d/collectors/python.d.plugin/postgres/postgres.chart.py#L266
+func queryServerCurrentConnectionsUsed() string {
+ return "SELECT sum(numbackends) FROM pg_stat_database;"
+}
+
+func queryServerConnectionsState() string {
+ return `
+SELECT state,
+ COUNT(*)
+FROM pg_stat_activity
+WHERE state IN
+ (
+ 'active',
+ 'idle',
+ 'idle in transaction',
+ 'idle in transaction (aborted)',
+ 'fastpath function call',
+ 'disabled'
+ )
+GROUP BY state;
+`
+}
+
+func queryCheckpoints() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_bgwriter.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-BGWRITER-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1104
+
+ return `
+SELECT checkpoints_timed,
+ checkpoints_req,
+ checkpoint_write_time,
+ checkpoint_sync_time,
+ buffers_checkpoint * current_setting('block_size')::numeric AS buffers_checkpoint_bytes,
+ buffers_clean * current_setting('block_size')::numeric AS buffers_clean_bytes,
+ maxwritten_clean,
+ buffers_backend * current_setting('block_size')::numeric AS buffers_backend_bytes,
+ buffers_backend_fsync,
+ buffers_alloc * current_setting('block_size')::numeric AS buffers_alloc_bytes
+FROM pg_stat_bgwriter;
+`
+}
+
+func queryServerUptime() string {
+ return `SELECT EXTRACT(epoch FROM CURRENT_TIMESTAMP - pg_postmaster_start_time());`
+}
+
+func queryTXIDWraparound() string {
+ // https://www.crunchydata.com/blog/managing-transaction-id-wraparound-in-postgresql
+ return `
+ WITH max_age AS ( SELECT
+ 2000000000 as max_old_xid,
+ setting AS autovacuum_freeze_max_age
+ FROM
+ pg_catalog.pg_settings
+ WHERE
+ name = 'autovacuum_freeze_max_age'), per_database_stats AS ( SELECT
+ datname ,
+ m.max_old_xid::int ,
+ m.autovacuum_freeze_max_age::int ,
+ age(d.datfrozenxid) AS oldest_current_xid
+ FROM
+ pg_catalog.pg_database d
+ JOIN
+ max_age m
+ ON (true)
+ WHERE
+ d.datallowconn) SELECT
+ max(oldest_current_xid) AS oldest_current_xid ,
+ max(ROUND(100*(oldest_current_xid/max_old_xid::float))) AS percent_towards_wraparound ,
+ max(ROUND(100*(oldest_current_xid/autovacuum_freeze_max_age::float))) AS percent_towards_emergency_autovacuum
+ FROM
+ per_database_stats;
+`
+}
+
+func queryWALWrites(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT
+ pg_xlog_location_diff(
+ CASE
+ pg_is_in_recovery()
+ WHEN
+ TRUE
+ THEN
+ pg_last_xlog_receive_location()
+ ELSE
+ pg_current_xlog_location()
+ END
+, '0/0') AS wal_writes ;
+`
+ }
+ return `
+SELECT
+ pg_wal_lsn_diff(
+ CASE
+ pg_is_in_recovery()
+ WHEN
+ TRUE
+ THEN
+ pg_last_wal_receive_lsn()
+ ELSE
+ pg_current_wal_lsn()
+ END
+, '0/0') AS wal_writes ;
+`
+}
+
+func queryWALFiles(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files,
+ count(*) FILTER (WHERE type = 'written') AS wal_written_files
+FROM (SELECT wal.name,
+ pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END),
+ CASE
+ WHEN wal.name > pg_xlogfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_xlog_location()
+ END) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_xlog') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY (pg_stat_file('pg_xlog/' || name, true)).modification,
+ wal.name DESC) sub;
+`
+ }
+ return `
+SELECT count(*) FILTER (WHERE type = 'recycled') AS wal_recycled_files,
+ count(*) FILTER (WHERE type = 'written') AS wal_written_files
+FROM (SELECT wal.name,
+ pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END),
+ CASE
+ WHEN wal.name > pg_walfile_name(
+ CASE pg_is_in_recovery()
+ WHEN true THEN NULL
+ ELSE pg_current_wal_lsn()
+ END) THEN 'recycled'
+ ELSE 'written'
+ END AS type
+ FROM pg_catalog.pg_ls_dir('pg_wal') AS wal(name)
+ WHERE name ~ '^[0-9A-F]{24}$'
+ ORDER BY (pg_stat_file('pg_wal/' || name, true)).modification,
+ wal.name DESC) sub;
+`
+}
+
+func queryWALArchiveFiles(version int) string {
+ if version < pgVersion10 {
+ return `
+ SELECT
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),
+ 0) AS INT) AS wal_archive_files_ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),
+ 0) AS INT) AS wal_archive_files_done_count
+ FROM
+ pg_catalog.pg_ls_dir('pg_xlog/archive_status') AS archive_files (archive_file);
+`
+ }
+ return `
+ SELECT
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.ready$$r$ as INT)),
+ 0) AS INT) AS wal_archive_files_ready_count,
+ CAST(COALESCE(SUM(CAST(archive_file ~ $r$\.done$$r$ AS INT)),
+ 0) AS INT) AS wal_archive_files_done_count
+ FROM
+ pg_catalog.pg_ls_dir('pg_wal/archive_status') AS archive_files (archive_file);
+`
+}
+
+func queryCatalogRelations() string {
+ // kind of same as
+ // https://github.com/netdata/netdata/blob/750810e1798e09cc6210e83594eb9ed4905f8f12/collectors/python.d.plugin/postgres/postgres.chart.py#L336-L354
+ // TODO: do we need that? It is optional and disabled by default in py version.
+ return `
+SELECT relkind,
+ COUNT(1),
+ SUM(relpages) * current_setting('block_size')::NUMERIC AS size
+FROM pg_class
+GROUP BY relkind;
+`
+}
+
+func queryAutovacuumWorkers() string {
+ // https://github.com/postgres/postgres/blob/9e4f914b5eba3f49ab99bdecdc4f96fac099571f/src/backend/postmaster/autovacuum.c#L3168-L3183
+ return `
+SELECT count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_analyze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: VACUUM ANALYZE%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum_analyze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: VACUUM %.%%'
+ AND query NOT LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum,
+ count(*) FILTER (
+ WHERE
+ query LIKE '%%to prevent wraparound%%'
+ ) AS autovacuum_vacuum_freeze,
+ count(*) FILTER (
+ WHERE
+ query LIKE 'autovacuum: BRIN summarize%%'
+ ) AS autovacuum_brin_summarize
+FROM pg_stat_activity
+WHERE query NOT LIKE '%%pg_stat_activity%%';
+`
+}
+
+func queryXactQueryRunningTime() string {
+ return `
+SELECT datname,
+ state,
+ EXTRACT(epoch from now() - xact_start) as xact_running_time,
+ EXTRACT(epoch from now() - query_start) as query_running_time
+FROM pg_stat_activity
+WHERE datname IS NOT NULL
+ AND state IN
+ (
+ 'active',
+ 'idle in transaction',
+ 'idle in transaction (aborted)'
+ )
+ AND backend_type = 'client backend';
+`
+}
+
+func queryReplicationStandbyAppDelta(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT application_name,
+ pg_xlog_location_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_xlog_receive_location()
+ ELSE pg_current_xlog_location()
+ END,
+ sent_location) AS sent_delta,
+ pg_xlog_location_diff(
+ sent_location, write_location) AS write_delta,
+ pg_xlog_location_diff(
+ write_location, flush_location) AS flush_delta,
+ pg_xlog_location_diff(
+ flush_location, replay_location) AS replay_delta
+FROM pg_stat_replication psr
+WHERE application_name IS NOT NULL;
+`
+ }
+ return `
+SELECT application_name,
+ pg_wal_lsn_diff(
+ CASE pg_is_in_recovery()
+ WHEN true THEN pg_last_wal_receive_lsn()
+ ELSE pg_current_wal_lsn()
+ END,
+ sent_lsn) AS sent_delta,
+ pg_wal_lsn_diff(
+ sent_lsn, write_lsn) AS write_delta,
+ pg_wal_lsn_diff(
+ write_lsn, flush_lsn) AS flush_delta,
+ pg_wal_lsn_diff(
+ flush_lsn, replay_lsn) AS replay_delta
+FROM pg_stat_replication
+WHERE application_name IS NOT NULL;
+`
+}
+
+func queryReplicationStandbyAppLag() string {
+ return `
+SELECT application_name,
+ COALESCE(EXTRACT(EPOCH FROM write_lag)::bigint, 0) AS write_lag,
+ COALESCE(EXTRACT(EPOCH FROM flush_lag)::bigint, 0) AS flush_lag,
+ COALESCE(EXTRACT(EPOCH FROM replay_lag)::bigint, 0) AS replay_lag
+FROM pg_stat_replication psr
+WHERE application_name IS NOT NULL;
+`
+}
+
+func queryReplicationSlotFiles(version int) string {
+ if version < pgVersion11 {
+ return `
+WITH wal_size AS (
+ SELECT
+ current_setting('wal_block_size')::INT * setting::INT AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ CASE WHEN pg_is_in_recovery()
+ THEN (
+ pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val)
+ ) / s.val
+ ELSE (
+ pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_walfile_name_offset(restart_lsn)).file_offset
+ ) / s.val
+ END
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+`
+ }
+
+ return `
+WITH wal_size AS (
+ SELECT
+ setting::int AS val
+ FROM pg_settings
+ WHERE name = 'wal_segment_size'
+ )
+SELECT
+ slot_name,
+ slot_type,
+ replslot_wal_keep,
+ count(slot_file) AS replslot_files
+FROM
+ (SELECT
+ slot.slot_name,
+ CASE
+ WHEN slot_file <> 'state' THEN 1
+ END AS slot_file ,
+ slot_type,
+ COALESCE (
+ floor(
+ CASE WHEN pg_is_in_recovery()
+ THEN (
+ pg_wal_lsn_diff(pg_last_wal_receive_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_wal_lsn_diff(restart_lsn, '0/0') % s.val)
+ ) / s.val
+ ELSE (
+ pg_wal_lsn_diff(pg_current_wal_lsn(), slot.restart_lsn)
+ -- this is needed to account for whole WAL retention and
+ -- not only size retention
+ + (pg_walfile_name_offset(restart_lsn)).file_offset
+ ) / s.val
+ END
+ ),0) AS replslot_wal_keep
+ FROM pg_replication_slots slot
+ LEFT JOIN (
+ SELECT
+ slot2.slot_name,
+ pg_ls_dir('pg_replslot/' || slot2.slot_name) AS slot_file
+ FROM pg_replication_slots slot2
+ ) files (slot_name, slot_file)
+ ON slot.slot_name = files.slot_name
+ CROSS JOIN wal_size s
+ ) AS d
+GROUP BY
+ slot_name,
+ slot_type,
+ replslot_wal_keep;
+`
+}
+
+func queryQueryableDatabaseList() string {
+ return `
+SELECT datname
+FROM pg_database
+WHERE datallowconn = true
+ AND datistemplate = false
+ AND datname != current_database()
+ AND has_database_privilege((SELECT CURRENT_USER), datname, 'connect');
+`
+}
+
+func queryDatabaseStats() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_database.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1018
+
+ return `
+SELECT stat.datname,
+ numbackends,
+ pg_database.datconnlimit,
+ xact_commit,
+ xact_rollback,
+ blks_read * current_setting('block_size')::numeric AS blks_read_bytes,
+ blks_hit * current_setting('block_size')::numeric AS blks_hit_bytes,
+ tup_returned,
+ tup_fetched,
+ tup_inserted,
+ tup_updated,
+ tup_deleted,
+ conflicts,
+ temp_files,
+ temp_bytes,
+ deadlocks
+FROM pg_stat_database stat
+ INNER JOIN
+ pg_database
+ ON pg_database.datname = stat.datname
+WHERE pg_database.datistemplate = false;
+`
+}
+
+func queryDatabaseSize(version int) string {
+ if version < pgVersion10 {
+ return `
+SELECT datname,
+ pg_database_size(datname) AS size
+FROM pg_database
+WHERE pg_database.datistemplate = false
+ AND has_database_privilege((SELECT CURRENT_USER), pg_database.datname, 'connect');
+`
+ }
+ return `
+SELECT datname,
+ pg_database_size(datname) AS size
+FROM pg_database
+WHERE pg_database.datistemplate = false
+ AND (has_database_privilege((SELECT CURRENT_USER), datname, 'connect')
+ OR pg_has_role((SELECT CURRENT_USER), 'pg_read_all_stats', 'MEMBER'));
+`
+}
+
+func queryDatabaseConflicts() string {
+ // definition by version: https://pgpedia.info/p/pg_stat_database_conflicts.html
+ // docs: https://www.postgresql.org/docs/current/monitoring-stats.html#MONITORING-PG-STAT-DATABASE-CONFLICTS-VIEW
+ // code: https://github.com/postgres/postgres/blob/366283961ac0ed6d89014444c6090f3fd02fce0a/src/backend/catalog/system_views.sql#L1058
+
+ return `
+SELECT stat.datname,
+ confl_tablespace,
+ confl_lock,
+ confl_snapshot,
+ confl_bufferpin,
+ confl_deadlock
+FROM pg_stat_database_conflicts stat
+ INNER JOIN
+ pg_database
+ ON pg_database.datname = stat.datname
+WHERE pg_database.datistemplate = false;
+`
+}
+
+func queryDatabaseLocks() string {
+ // definition by version: https://pgpedia.info/p/pg_locks.html
+ // docs: https://www.postgresql.org/docs/current/view-pg-locks.html
+
+ return `
+SELECT pg_database.datname,
+ mode,
+ granted,
+ count(mode) AS locks_count
+FROM pg_locks
+ INNER JOIN
+ pg_database
+ ON pg_database.oid = pg_locks.database
+WHERE pg_database.datistemplate = false
+GROUP BY datname,
+ mode,
+ granted
+ORDER BY datname,
+ mode;
+`
+}
+
+func queryUserTablesCount() string {
+ return "SELECT count(*) from pg_stat_user_tables;"
+}
+
+func queryStatUserTables() string {
+ return `
+SELECT current_database() as datname,
+ schemaname,
+ relname,
+ inh.parent_relname,
+ seq_scan,
+ seq_tup_read,
+ idx_scan,
+ idx_tup_fetch,
+ n_tup_ins,
+ n_tup_upd,
+ n_tup_del,
+ n_tup_hot_upd,
+ n_live_tup,
+ n_dead_tup,
+ EXTRACT(epoch from now() - last_vacuum) as last_vacuum,
+ EXTRACT(epoch from now() - last_autovacuum) as last_autovacuum,
+ EXTRACT(epoch from now() - last_analyze) as last_analyze,
+ EXTRACT(epoch from now() - last_autoanalyze) as last_autoanalyze,
+ vacuum_count,
+ autovacuum_count,
+ analyze_count,
+ autoanalyze_count,
+ pg_total_relation_size(quote_ident(schemaname) || '.' || quote_ident(relname)) as total_relation_size
+FROM pg_stat_user_tables
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+func queryStatIOUserTables() string {
+ return `
+SELECT current_database() AS datname,
+ schemaname,
+ relname,
+ inh.parent_relname,
+ heap_blks_read * current_setting('block_size')::numeric AS heap_blks_read_bytes,
+ heap_blks_hit * current_setting('block_size')::numeric AS heap_blks_hit_bytes,
+ idx_blks_read * current_setting('block_size')::numeric AS idx_blks_read_bytes,
+ idx_blks_hit * current_setting('block_size')::numeric AS idx_blks_hit_bytes,
+ toast_blks_read * current_setting('block_size')::numeric AS toast_blks_read_bytes,
+ toast_blks_hit * current_setting('block_size')::numeric AS toast_blks_hit_bytes,
+ tidx_blks_read * current_setting('block_size')::numeric AS tidx_blks_read_bytes,
+ tidx_blks_hit * current_setting('block_size')::numeric AS tidx_blks_hit_bytes
+FROM pg_statio_user_tables
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+func queryUserIndexesCount() string {
+ return "SELECT count(*) from pg_stat_user_indexes;"
+}
+
+func queryStatUserIndexes() string {
+ return `
+SELECT current_database() as datname,
+ schemaname,
+ relname,
+ indexrelname,
+ inh.parent_relname,
+ idx_scan,
+ idx_tup_read,
+ idx_tup_fetch,
+ pg_relation_size(quote_ident(schemaname) || '.' || quote_ident(indexrelname)::text) as size
+FROM pg_stat_user_indexes
+LEFT JOIN(
+ SELECT
+ c.oid AS child_oid,
+ p.relname AS parent_relname
+ FROM
+ pg_inherits
+ JOIN pg_class AS c ON (inhrelid = c.oid)
+ JOIN pg_class AS p ON (inhparent = p.oid)
+ ) AS inh ON inh.child_oid = relid
+WHERE has_schema_privilege(schemaname, 'USAGE');
+`
+}
+
+// The following query for bloat was taken from the venerable check_postgres
+// script (https://bucardo.org/check_postgres/), which is:
+//
+// Copyright (c) 2007-2017 Greg Sabino Mullane
+//------------------------------------------------------------------------------
+
+func queryBloat() string {
+ return `
+SELECT
+ current_database() AS db, schemaname, tablename, reltuples::bigint AS tups, relpages::bigint AS pages, otta,
+ ROUND(CASE WHEN otta=0 OR sml.relpages=0 OR sml.relpages=otta THEN 0.0 ELSE sml.relpages/otta::numeric END,1) AS tbloat,
+ CASE WHEN relpages < otta THEN 0 ELSE relpages::bigint - otta END AS wastedpages,
+ CASE WHEN relpages < otta THEN 0 ELSE bs*(sml.relpages-otta)::bigint END AS wastedbytes,
+ CASE WHEN relpages < otta THEN '0 bytes'::text ELSE (bs*(relpages-otta))::bigint::text || ' bytes' END AS wastedsize,
+ iname, ituples::bigint AS itups, ipages::bigint AS ipages, iotta,
+ ROUND(CASE WHEN iotta=0 OR ipages=0 OR ipages=iotta THEN 0.0 ELSE ipages/iotta::numeric END,1) AS ibloat,
+ CASE WHEN ipages < iotta THEN 0 ELSE ipages::bigint - iotta END AS wastedipages,
+ CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta) END AS wastedibytes,
+ CASE WHEN ipages < iotta THEN '0 bytes' ELSE (bs*(ipages-iotta))::bigint::text || ' bytes' END AS wastedisize,
+ CASE WHEN relpages < otta THEN
+ CASE WHEN ipages < iotta THEN 0 ELSE bs*(ipages-iotta::bigint) END
+ ELSE CASE WHEN ipages < iotta THEN bs*(relpages-otta::bigint)
+ ELSE bs*(relpages-otta::bigint + ipages-iotta::bigint) END
+ END AS totalwastedbytes
+FROM (
+ SELECT
+ nn.nspname AS schemaname,
+ cc.relname AS tablename,
+ COALESCE(cc.reltuples,0) AS reltuples,
+ COALESCE(cc.relpages,0) AS relpages,
+ COALESCE(bs,0) AS bs,
+ COALESCE(CEIL((cc.reltuples*((datahdr+ma-
+ (CASE WHEN datahdr%ma=0 THEN ma ELSE datahdr%ma END))+nullhdr2+4))/(bs-20::float)),0) AS otta,
+ COALESCE(c2.relname,'?') AS iname, COALESCE(c2.reltuples,0) AS ituples, COALESCE(c2.relpages,0) AS ipages,
+ COALESCE(CEIL((c2.reltuples*(datahdr-12))/(bs-20::float)),0) AS iotta -- very rough approximation, assumes all cols
+ FROM
+ pg_class cc
+ JOIN pg_namespace nn ON cc.relnamespace = nn.oid AND nn.nspname <> 'information_schema'
+ LEFT JOIN
+ (
+ SELECT
+ ma,bs,foo.nspname,foo.relname,
+ (datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
+ (maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
+ FROM (
+ SELECT
+ ns.nspname, tbl.relname, hdr, ma, bs,
+ SUM((1-coalesce(null_frac,0))*coalesce(avg_width, 2048)) AS datawidth,
+ MAX(coalesce(null_frac,0)) AS maxfracsum,
+ hdr+(
+ SELECT 1+count(*)/8
+ FROM pg_stats s2
+ WHERE null_frac<>0 AND s2.schemaname = ns.nspname AND s2.tablename = tbl.relname
+ ) AS nullhdr
+ FROM pg_attribute att
+ JOIN pg_class tbl ON att.attrelid = tbl.oid
+ JOIN pg_namespace ns ON ns.oid = tbl.relnamespace
+ LEFT JOIN pg_stats s ON s.schemaname=ns.nspname
+ AND s.tablename = tbl.relname
+ AND s.inherited=false
+ AND s.attname=att.attname,
+ (
+ SELECT
+ (SELECT current_setting('block_size')::numeric) AS bs,
+ CASE WHEN SUBSTRING(SPLIT_PART(v, ' ', 2) FROM '#"[0-9]+.[0-9]+#"%' for '#')
+ IN ('8.0','8.1','8.2') THEN 27 ELSE 23 END AS hdr,
+ CASE WHEN v ~ 'mingw32' OR v ~ '64-bit' THEN 8 ELSE 4 END AS ma
+ FROM (SELECT version() AS v) AS foo
+ ) AS constants
+ WHERE att.attnum > 0 AND tbl.relkind='r'
+ GROUP BY 1,2,3,4,5
+ ) AS foo
+ ) AS rs
+ ON cc.relname = rs.relname AND nn.nspname = rs.nspname
+ LEFT JOIN pg_index i ON indrelid = cc.oid
+ LEFT JOIN pg_class c2 ON c2.oid = i.indexrelid
+) AS sml
+WHERE sml.relpages - otta > 10 OR ipages - iotta > 10;
+`
+}
+
+func queryColumnsStats() string {
+ return `
+SELECT current_database() AS datname,
+ nspname AS schemaname,
+ relname,
+ st.attname,
+ typname,
+ (st.null_frac * 100)::int AS null_percent,
+ case
+ when st.n_distinct >= 0
+ then st.n_distinct
+ else
+ abs(st.n_distinct) * reltuples
+ end AS "distinct"
+FROM pg_class c
+ JOIN
+ pg_namespace ns
+ ON
+ (ns.oid = relnamespace)
+ JOIN
+ pg_attribute at
+ ON
+ (c.oid = attrelid)
+ JOIN
+ pg_type t
+ ON
+ (t.oid = atttypid)
+ JOIN
+ pg_stats st
+ ON
+ (st.tablename = relname AND st.attname = at.attname)
+WHERE relkind = 'r'
+ AND nspname NOT LIKE E'pg\\_%'
+ AND nspname != 'information_schema'
+ AND NOT attisdropped
+ AND attstattarget != 0
+ AND reltuples >= 100
+ORDER BY nspname,
+ relname,
+ st.attname;
+`
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.json b/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.json
new file mode 100644
index 000000000..6b39278c5
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.json
@@ -0,0 +1,14 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123,
+ "collect_databases_matching": "ok",
+ "transaction_time_histogram": [
+ 123.123
+ ],
+ "query_time_histogram": [
+ 123.123
+ ],
+ "max_db_tables": 123,
+ "max_db_indexes": 123
+}
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.yaml b/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.yaml
new file mode 100644
index 000000000..36ff5f0b1
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
+collect_databases_matching: "ok"
+transaction_time_histogram:
+ - 123.123
+query_time_histogram:
+ - 123.123
+max_db_tables: 123
+max_db_indexes: 123
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt
new file mode 100644
index 000000000..7adc787bc
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/autovacuum_workers.txt
@@ -0,0 +1,3 @@
+ autovacuum_analyze | autovacuum_vacuum_analyze | autovacuum_vacuum | autovacuum_vacuum_freeze | autovacuum_brin_summarize
+--------------------+---------------------------+-------------------+--------------------------+---------------------------
+ 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt
new file mode 100644
index 000000000..307695363
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/bloat_tables.txt
@@ -0,0 +1,12 @@
+ db | schemaname | tablename | tups | pages | otta | tbloat | wastedpages | wastedbytes | wastedsize | iname | itups | ipages | iotta | ibloat | wastedipages | wastedibytes | wastedisize | totalwastedbytes
+----------+------------+---------------------------------+---------+-------+-------+--------+-------------+-------------+---------------+---------------------------+---------+--------+-------+--------+--------------+--------------+--------------+------------------
+ postgres | pg_catalog | pg_proc_oid_index | 3202 | 11 | 0 | 0.0 | 11 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_proc_proname_args_nsp_index | 3202 | 32 | 0 | 0.0 | 32 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_attribute_relid_attnam_index | 2971 | 15 | 0 | 0.0 | 15 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_description_o_c_o_index | 5078 | 27 | 0 | 0.0 | 27 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend_depender_index | 8814 | 43 | 0 | 0.0 | 43 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend_reference_index | 8814 | 53 | 0 | 0.0 | 53 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0
+ postgres | pg_catalog | pg_depend | 8814 | 65 | 65 | 0.0 | 0 | 0 | 0 bytes | pg_depend_reference_index | 8814 | 53 | 40 | 1.3 | 13 | 106496 | 106496 bytes | 106496
+ postgres | pg_toast | pg_toast_2618 | 283 | 63 | 0 | 0.0 | 63 | 0 | 0 bytes | pg_toast_2618_index | 0 | 1 | 0 | 0.0 | 1 | 0 | 0 bytes | 0
+ postgres | public | pgbench_accounts | 5000000 | 81968 | 80764 | 1.0 | 1204 | 9863168 | 9863168 bytes | pgbench_accounts_pkey | 5000000 | 13713 | 66692 | 0.2 | 0 | 0 | 0 bytes | 9863168
+ postgres | public | pgbench_accounts_pkey | 5000000 | 13713 | 0 | 0.0 | 13713 | 0 | 0 bytes | ? | 0 | 0 | 0 | 0.0 | 0 | 0 | 0 bytes | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt
new file mode 100644
index 000000000..cd05e89af
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/catalog_relations.txt
@@ -0,0 +1,6 @@
+ relkind | count | size
+---------+-------+---------
+ r | 66 | 3424256
+ v | 137 | 0
+ i | 155 | 3678208
+ t | 38 | 548864 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt
new file mode 100644
index 000000000..851ff1320
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/checkpoints.txt
@@ -0,0 +1,3 @@
+ checkpoints_timed | checkpoints_req | checkpoint_write_time | checkpoint_sync_time | buffers_checkpoint_bytes | buffers_clean_bytes | maxwritten_clean | buffers_backend_bytes | buffers_backend_fsync | buffers_alloc_bytes
+-------------------+-----------------+-----------------------+----------------------+--------------------------+---------------------+------------------+-----------------------+-----------------------+--------------------
+ 1814 | 16 | 167 | 47 | 32768 | 0 | 0 | 0 | 0 | 27295744 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt
new file mode 100644
index 000000000..34229182a
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_conflicts.txt
@@ -0,0 +1,4 @@
+ datname | confl_tablespace | confl_lock | confl_snapshot | confl_bufferpin | confl_deadlock
+------------+------------------+------------+----------------+-----------------+----------------
+ postgres | 0 | 0 | 0 | 0 | 0
+ production | 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt
new file mode 100644
index 000000000..8d92f314d
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_locks.txt
@@ -0,0 +1,7 @@
+ datname | mode | granted | locks_count
+------------+--------------------------+---------+-------------
+ postgres | AccessShareLock | t | 99
+ postgres | RowShareLock | t | 99
+ postgres | RowExclusiveLock | t | 99
+ production | ShareUpdateExclusiveLock | t | 99
+ production | ShareLock | f | 99 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt
new file mode 100644
index 000000000..367cb6f20
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_size.txt
@@ -0,0 +1,4 @@
+ datname | size
+------------+--------
+ postgres | 8758051
+ production | 8602115 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt
new file mode 100644
index 000000000..d3ce24c6e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/database_stats.txt
@@ -0,0 +1,4 @@
+datname | numbackends | datconnlimit | xact_commit | xact_rollback | blks_read_bytes | blks_hit_bytes | tup_returned | tup_fetched | tup_inserted | tup_updated | tup_deleted | conflicts | temp_files | temp_bytes | deadlocks
+------------+-------------+--------------+-------------+---------------+-----------------+----------------+--------------+-------------+--------------+-------------+-------------+-----------+------------+------------+-----------
+postgres | 3 | 30 | 1438660 | 70 | 3252 | 1221125 | 13207245 | 359833 | 0 | 0 | 0 | 0 | 0 | 0 | 0
+production | 1 | -1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt
new file mode 100644
index 000000000..6cb2222d3
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-false.txt
@@ -0,0 +1,3 @@
+ is_superuser
+--------------
+ f \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt
new file mode 100644
index 000000000..84cd8088e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/is_super_user-true.txt
@@ -0,0 +1,3 @@
+ is_superuser
+--------------
+ t \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt
new file mode 100644
index 000000000..b684948e3
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/pg_is_in_recovery-true.txt
@@ -0,0 +1,3 @@
+ pg_is_in_recovery
+-------------------
+ t \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt
new file mode 100644
index 000000000..b3f2af4f1
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/queryable_database_list.txt
@@ -0,0 +1,2 @@
+ datname
+--------- \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt
new file mode 100644
index 000000000..59fcd8fe4
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_slot_files.txt
@@ -0,0 +1,3 @@
+ slot_name | slot_type | replslot_wal_keep | replslot_files
+-----------+-----------+-------------------+----------------
+ ocean | physical | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt
new file mode 100644
index 000000000..98c3cd99e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_delta.txt
@@ -0,0 +1,5 @@
+ application_name | sent_delta | write_delta | flush_delta | replay_delta
+------------------+------------+-------------+-------------+--------------
+ walreceiver | 1 | 1 | 1 | 1
+ walreceiver | 1 | 1 | 1 | 1
+ phys-standby2 | 0 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt
new file mode 100644
index 000000000..c2e253790
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/replication_standby_app_wal_lag.txt
@@ -0,0 +1,5 @@
+ application_name | write_lag | flush_lag | replay_lag
+------------------+-----------+-----------+------------
+ walreceiver | 1 | 1 | 1
+ walreceiver | 1 | 1 | 1
+ phys-standby2 | 0 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt
new file mode 100644
index 000000000..7387f4dfb
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_connections_state.txt
@@ -0,0 +1,8 @@
+ state | count
+-------------------------------+-------
+ active | 1
+ idle | 14
+ idle in transaction | 7
+ idle in transaction (aborted) | 1
+ fastpath function call | 1
+ disabled | 1 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt
new file mode 100644
index 000000000..065188d97
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_current_connections.txt
@@ -0,0 +1,3 @@
+ sum
+-----
+ 3 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt
new file mode 100644
index 000000000..18d769b32
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/server_version_num.txt
@@ -0,0 +1,3 @@
+ server_version_num
+--------------------
+ 140004 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt
new file mode 100644
index 000000000..4d59df214
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_connections.txt
@@ -0,0 +1,3 @@
+ current_setting
+-----------------
+ 100 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt
new file mode 100644
index 000000000..e72bd71aa
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/settings_max_locks_held.txt
@@ -0,0 +1,3 @@
+ ?column?
+----------
+ 6400 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt
new file mode 100644
index 000000000..db73fa4e6
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_indexes_db_postgres.txt
@@ -0,0 +1,11 @@
+ datname | schemaname | relname | indexrelname | idx_scan | idx_tup_read | idx_tup_fetch | size
+----------+------------+------------------+-------------------------+----------+--------------+---------------+-----------
+ postgres | public | pgbench_branches | pgbench_branches_pkey | 0 | 0 | 0 | 16384
+ postgres | public | pgbench_tellers | pgbench_tellers_pkey | 0 | 0 | 0 | 32768
+ postgres | public | pgbench_accounts | pgbench_accounts_pkey | 3 | 5000000 | 0 | 112336896
+ postgres | public | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192
+ postgres | public | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192
+ postgres | public | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_pkey | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_username_key | 0 | 0 | 0 | 8192
+ postgres | myschema | myaccounts | myaccounts_email_key | 0 | 0 | 0 | 8192 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt
new file mode 100644
index 000000000..f6f9edb04
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/stat_user_tables_db_postgres.txt
@@ -0,0 +1,6 @@
+ datname | schemaname | relname | seq_scan | seq_tup_read | idx_scan | idx_tup_fetch | n_tup_ins | n_tup_upd | n_tup_del | n_tup_hot_upd | n_live_tup | n_dead_tup | last_vacuum | last_autovacuum | last_analyze | last_autoanalyze | vacuum_count | autovacuum_count | analyze_count | autoanalyze_count | total_relation_size
+----------+------------+------------------+----------+--------------+----------+---------------+-----------+-----------+-----------+---------------+------------+------------+---------------+-----------------+---------------+------------------+--------------+------------------+---------------+-------------------+---------------------
+ postgres | public | pgbench_history | 0 | 0 | | | 0 | 0 | 0 | 0 | 0 | 0 | 377149.085671 | | 377149.085536 | | 1 | 0 | 1 | 0 | 0
+ postgres | public | pgbench_accounts | 2 | 5000000 | 99955 | 99955 | 5000000 | 0 | 0 | 0 | 5000048 | 1000048 | 377149.232856 | | 377149.097205 | | 1 | 0 | 1 | 0 | 784031744
+ postgres | public | pgbench_tellers | 1 | 500 | 0 | 0 | 500 | 0 | 0 | 0 | 500 | 0 | 371719.262166 | | 377149.824095 | | 6 | 0 | 1 | 0 | 90112
+ postgres | public | pgbench_branches | 6 | 300 | 0 | 0 | 50 | 0 | 0 | 0 | 50 | 0 | 371719.262495 | | 377149.826260 | | 6 | 0 | 1 | 0 | 57344 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt
new file mode 100644
index 000000000..f52b1806b
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/statio_user_tables_db_postgres.txt
@@ -0,0 +1,6 @@
+ datname | schemaname | relname | heap_blks_read_bytes | heap_blks_hit_bytes | idx_blks_read_bytes | idx_blks_hit_bytes | toast_blks_read_bytes | toast_blks_hit_bytes | tidx_blks_read_bytes | tidx_blks_hit_bytes
+----------+------------+------------------+----------------------+---------------------+---------------------+--------------------+-----------------------+----------------------+----------------------+---------------------
+ postgres | public | pgbench_tellers | 623828992 | 491937792 | 101433344 | 0 | | | |
+ postgres | public | pgbench_history | 0 | 0 | | | | | |
+ postgres | public | pgbench_accounts | 1803882668032 | 224484753408 | 973310976000 | 7138635948032 | | | |
+ postgres | public | pgbench_branches | 507150336 | 304316416 | 101425152 | 101441536 | | | | \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt
new file mode 100644
index 000000000..645d847d0
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/table_columns_stats.txt
@@ -0,0 +1,10 @@
+ datname | schemaname | relname | attname | typname | null_percent | distinct
+----------+------------+------------------+----------+---------+--------------+----------
+ postgres | public | pgbench_accounts | abalance | int4 | 0 | 1
+ postgres | public | pgbench_accounts | aid | int4 | 0 | 5e+06
+ postgres | public | pgbench_accounts | bid | int4 | 0 | 50
+ postgres | public | pgbench_accounts | filler | bpchar | 0 | 1
+ postgres | public | pgbench_tellers | bid | int4 | 0 | 50
+ postgres | public | pgbench_tellers | filler | bpchar | 100 | 0
+ postgres | public | pgbench_tellers | tbalance | int4 | 0 | 1
+ postgres | public | pgbench_tellers | tid | int4 | 0 | 500 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt
new file mode 100644
index 000000000..9e05f12ab
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/txid_wraparound.txt
@@ -0,0 +1,3 @@
+ oldest_current_xid | percent_towards_wraparound | percent_towards_emergency_autovacuum
+--------------------+----------------------------+-----------------------------------
+ 9 | 0 | 0 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt
new file mode 100644
index 000000000..95464bc3c
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/uptime.txt
@@ -0,0 +1,3 @@
+ extract
+---------------
+ 499906.075943 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt
new file mode 100644
index 000000000..8b7a86261
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_archive_files.txt
@@ -0,0 +1,3 @@
+ wal_archive_files_ready_count | wal_archive_files_done_count
+-------------------------------+------------------------------
+ 1 | 1 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt
new file mode 100644
index 000000000..f18aefdcd
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_files.txt
@@ -0,0 +1,3 @@
+ wal_recycled_files | wal_written_files
+--------------------+-------------------
+ 0 | 1 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt
new file mode 100644
index 000000000..3bb8f9e95
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/wal_writes.txt
@@ -0,0 +1,3 @@
+ wal_writes
+------------
+ 24103144 \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt
new file mode 100644
index 000000000..52617f748
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/postgres/testdata/v14.4/xact_query_running_time.txt
@@ -0,0 +1,10 @@
+ datname | state | xact_running_time | query_running_time
+----------+---------------------+-------------------+--------------------
+ some_db | idle in transaction | 574.530219 | 574.315061
+ some_db | idle in transaction | 574.867167 | 574.330322
+ postgres | active | 0.000000 | 0.000000
+ some_db | idle in transaction | 574.807256 | 574.377105
+ some_db | idle in transaction | 574.680244 | 574.357246
+ some_db | idle in transaction | 574.800283 | 574.330328
+ some_db | idle in transaction | 574.396730 | 574.290165
+ some_db | idle in transaction | 574.665428 | 574.337164 \ No newline at end of file