summaryrefslogtreecommitdiffstats
path: root/src/go/collectors/go.d.plugin/modules/clickhouse
diff options
context:
space:
mode:
Diffstat (limited to '')
l---------src/go/collectors/go.d.plugin/modules/clickhouse/README.md1
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/charts.go1005
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse.go123
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse_test.go315
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect.go96
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_async_metrics.go61
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_disks.go82
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_events.go120
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_metrics.go75
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_parts.go98
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_processes.go29
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/config_schema.json177
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/init.go21
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/integrations/clickhouse.md333
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml624
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.json20
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.yaml17
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_longest_query_time.csv2
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_async_metrics.csv434
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_disks.csv2
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_events.csv102
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_metrics.csv283
-rw-r--r--src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_parts.csv6
23 files changed, 4026 insertions, 0 deletions
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/README.md b/src/go/collectors/go.d.plugin/modules/clickhouse/README.md
new file mode 120000
index 000000000..078a1eee2
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/README.md
@@ -0,0 +1 @@
+integrations/clickhouse.md \ No newline at end of file
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/charts.go b/src/go/collectors/go.d.plugin/modules/clickhouse/charts.go
new file mode 100644
index 000000000..cefcca1e2
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/charts.go
@@ -0,0 +1,1005 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+)
+
+const (
+ prioConnections = module.Priority + iota
+
+ prioSlowReads
+ prioReadBackoff
+
+ prioMemoryUsage
+
+ prioDiskSpaceUsage
+
+ prioRunningQueries
+ prioQueriesPreempted
+ prioQueries
+ prioSelectQueries
+ prioInsertQueries
+ prioQueriesMemoryLimitExceeded
+
+ prioLongestRunningQueryTime
+ prioQueriesLatency
+ prioSelectQueriesLatency
+ prioInsertQueriesLatency
+
+ prioIO
+ prioIOPS
+ prioIOErrors
+ prioIOSeeks
+ prioIOFileOpens
+
+ prioDatabaseTableSize
+ prioDatabaseTableParts
+ prioDatabaseTableRows
+
+ prioReplicatedPartsCurrentActivity
+ prioReplicasMaxAbsoluteDelay
+ prioReadOnlyReplica
+ prioReplicatedDataLoss
+ prioReplicatedPartFetches
+ prioReplicatedPartFetchesOfMerged
+ prioReplicatedPartMerges
+
+ prioInsertedBytes
+ prioInsertedRows
+ prioRejectedInserts
+ prioDelayedInserts
+ prioDelayedInsertsThrottleTime
+
+ prioSelectedBytes
+ prioSelectedRows
+ prioSelectedParts
+ prioSelectedRanges
+ prioSelectedMarks
+
+ prioMerges
+ prioMergesLatency
+ prioMergedUncompressedBytes
+ prioMergedRows
+
+ prioMergeTreeDataWriterRows
+ prioMergeTreeDataWriterUncompressedBytes
+ prioMergeTreeDataWriterCompressedBytes
+
+ prioUncompressedCacheRequests
+ prioMarkCacheRequests
+
+ prioMaxPartCountForPartition
+ prioParts
+
+ prioDistributedSend
+ prioDistributedConnectionTries
+ prioDistributedConnectionFailTry
+ prioDistributedConnectionFailAtAll
+
+ prioDistributedFilesToInsert
+ prioDistributedRejectedInserts
+ prioDistributedDelayedInserts
+ prioDistributedDelayedInsertsMilliseconds
+ prioDistributedSyncInsertionTimeoutExceeded
+ prioDistributedAsyncInsertionFailures
+
+ prioUptime
+)
+
+var chCharts = module.Charts{
+ chartConnections.Copy(),
+
+ chartMemoryUsage.Copy(),
+
+ chartSlowReads.Copy(),
+ chartReadBackoff.Copy(),
+
+ chartRunningQueries.Copy(),
+ chartQueries.Copy(),
+ chartSelectQueries.Copy(),
+ chartInsertQueries.Copy(),
+ chartQueriesPreempted.Copy(),
+ chartQueriesMemoryLimitExceeded.Copy(),
+
+ chartLongestRunningQueryTime.Copy(),
+ chartQueriesLatency.Copy(),
+ chartSelectQueriesLatency.Copy(),
+ chartInsertQueriesLatency.Copy(),
+
+ chartFileDescriptorIO.Copy(),
+ chartFileDescriptorIOPS.Copy(),
+ chartFileDescriptorIOErrors.Copy(),
+ chartIOSeeks.Copy(),
+ chartIOFileOpens.Copy(),
+
+ chartReplicatedPartsActivity.Copy(),
+ chartReplicasMaxAbsoluteDelay.Copy(),
+ chartReadonlyReplica.Copy(),
+ chartReplicatedDataLoss.Copy(),
+ chartReplicatedPartFetches.Copy(),
+ chartReplicatedPartMerges.Copy(),
+ chartReplicatedPartFetchesOfMerged.Copy(),
+
+ chartInsertedRows.Copy(),
+ chartInsertedBytes.Copy(),
+ chartRejectedInserts.Copy(),
+ chartDelayedInserts.Copy(),
+ chartDelayedInsertsThrottleTime.Copy(),
+
+ chartSelectedRows.Copy(),
+ chartSelectedBytes.Copy(),
+ chartSelectedParts.Copy(),
+ chartSelectedRanges.Copy(),
+ chartSelectedMarks.Copy(),
+
+ chartMerges.Copy(),
+ chartMergesLatency.Copy(),
+ chartMergedUncompressedBytes.Copy(),
+ chartMergedRows.Copy(),
+
+ chartMergeTreeDataWriterInsertedRows.Copy(),
+ chartMergeTreeDataWriterUncompressedBytes.Copy(),
+ chartMergeTreeDataWriterCompressedBytes.Copy(),
+
+ chartUncompressedCacheRequests.Copy(),
+ chartMarkCacheRequests.Copy(),
+
+ chartMaxPartCountForPartition.Copy(),
+ chartPartsCount.Copy(),
+
+ chartDistributedConnections.Copy(),
+ chartDistributedConnectionAttempts.Copy(),
+ chartDistributedConnectionFailRetries.Copy(),
+ chartDistributedConnectionFailExhaustedRetries.Copy(),
+
+ chartDistributedFilesToInsert.Copy(),
+ chartDistributedRejectedInserts.Copy(),
+ chartDistributedDelayedInserts.Copy(),
+ chartDistributedDelayedInsertsLatency.Copy(),
+ chartDistributedSyncInsertionTimeoutExceeded.Copy(),
+ chartDistributedAsyncInsertionFailures.Copy(),
+
+ chartUptime.Copy(),
+}
+
+var (
+ chartConnections = module.Chart{
+ ID: "connections",
+ Title: "Connections",
+ Units: "connections",
+ Fam: "conns",
+ Ctx: "clickhouse.connections",
+ Priority: prioConnections,
+ Dims: module.Dims{
+ {ID: "metrics_TCPConnection", Name: "tcp"},
+ {ID: "metrics_HTTPConnection", Name: "http"},
+ {ID: "metrics_MySQLConnection", Name: "mysql"},
+ {ID: "metrics_PostgreSQLConnection", Name: "postgresql"},
+ {ID: "metrics_InterserverConnection", Name: "interserver"},
+ },
+ }
+)
+
+var (
+ chartSlowReads = module.Chart{
+ ID: "slow_reads",
+ Title: "Slow reads from a file",
+ Units: "reads/s",
+ Fam: "slow reads",
+ Ctx: "clickhouse.slow_reads",
+ Priority: prioSlowReads,
+ Dims: module.Dims{
+ {ID: "events_SlowRead", Name: "slow"},
+ },
+ }
+ chartReadBackoff = module.Chart{
+ ID: "read_backoff",
+ Title: "Read backoff events",
+ Units: "events/s",
+ Fam: "slow reads",
+ Ctx: "clickhouse.read_backoff",
+ Priority: prioReadBackoff,
+ Dims: module.Dims{
+ {ID: "events_ReadBackoff", Name: "read_backoff"},
+ },
+ }
+)
+
+var (
+ chartMemoryUsage = module.Chart{
+ ID: "memory_usage",
+ Title: "Memory usage",
+ Units: "bytes",
+ Fam: "mem",
+ Ctx: "clickhouse.memory_usage",
+ Priority: prioMemoryUsage,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "metrics_MemoryTracking", Name: "used"},
+ },
+ }
+)
+
+var diskChartsTmpl = module.Charts{
+ diskSpaceUsageChartTmpl.Copy(),
+}
+
+var (
+ diskSpaceUsageChartTmpl = module.Chart{
+ ID: "disk_%s_space_usage",
+ Title: "Disk space usage",
+ Units: "bytes",
+ Fam: "disk space",
+ Ctx: "clickhouse.disk_space_usage",
+ Type: module.Stacked,
+ Priority: prioDiskSpaceUsage,
+ Dims: module.Dims{
+ {ID: "disk_%s_free_space_bytes", Name: "free"},
+ {ID: "disk_%s_used_space_bytes", Name: "used"},
+ },
+ }
+)
+
+var (
+ chartRunningQueries = module.Chart{
+ ID: "running_queries",
+ Title: "Running queries",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "clickhouse.running_queries",
+ Priority: prioRunningQueries,
+ Dims: module.Dims{
+ {ID: "metrics_Query", Name: "running"},
+ },
+ }
+ chartQueriesPreempted = module.Chart{
+ ID: "queries_preempted",
+ Title: "Queries waiting due to priority",
+ Units: "queries",
+ Fam: "queries",
+ Ctx: "clickhouse.queries_preempted",
+ Priority: prioQueriesPreempted,
+ Dims: module.Dims{
+ {ID: "metrics_QueryPreempted", Name: "preempted"},
+ },
+ }
+ chartQueries = module.Chart{
+ ID: "queries",
+ Title: "Queries",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "clickhouse.queries",
+ Priority: prioQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartSelectQueries = module.Chart{
+ ID: "select_queries",
+ Title: "Select queries",
+ Units: "selects/s",
+ Fam: "queries",
+ Ctx: "clickhouse.select_queries",
+ Priority: prioSelectQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulSelectQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedSelectQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartInsertQueries = module.Chart{
+ ID: "insert_queries",
+ Title: "Insert queries",
+ Units: "inserts/s",
+ Fam: "queries",
+ Ctx: "clickhouse.insert_queries",
+ Priority: prioInsertQueries,
+ Type: module.Stacked,
+ Dims: module.Dims{
+ {ID: "events_SuccessfulInsertQuery", Name: "successful", Algo: module.Incremental},
+ {ID: "events_FailedInsertQuery", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartQueriesMemoryLimitExceeded = module.Chart{
+ ID: "queries_memory_limit_exceeded",
+ Title: "Memory limit exceeded for query",
+ Units: "queries/s",
+ Fam: "queries",
+ Ctx: "clickhouse.queries_memory_limit_exceeded",
+ Priority: prioQueriesMemoryLimitExceeded,
+ Dims: module.Dims{
+ {ID: "events_QueryMemoryLimitExceeded", Name: "mem_limit_exceeded"},
+ },
+ }
+)
+
+var (
+ chartLongestRunningQueryTime = module.Chart{
+ ID: "longest_running_query_time",
+ Title: "Longest running query time",
+ Units: "seconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.longest_running_query_time",
+ Priority: prioLongestRunningQueryTime,
+ Dims: module.Dims{
+ {ID: "LongestRunningQueryTime", Name: "longest_query_time", Div: precision},
+ },
+ }
+ chartQueriesLatency = module.Chart{
+ ID: "queries_latency",
+ Title: "Queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.queries_latency",
+ Priority: prioQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_QueryTimeMicroseconds", Name: "queries_time", Algo: module.Incremental},
+ },
+ }
+ chartSelectQueriesLatency = module.Chart{
+ ID: "select_queries_latency",
+ Title: "Select queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.select_queries_latency",
+ Priority: prioSelectQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_SelectQueryTimeMicroseconds", Name: "selects_time", Algo: module.Incremental},
+ },
+ }
+ chartInsertQueriesLatency = module.Chart{
+ ID: "insert_queries_latency",
+ Title: "Insert queries latency",
+ Units: "microseconds",
+ Fam: "query latency",
+ Ctx: "clickhouse.insert_queries_latency",
+ Priority: prioInsertQueriesLatency,
+ Dims: module.Dims{
+ {ID: "events_InsertQueryTimeMicroseconds", Name: "inserts_time", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartFileDescriptorIO = module.Chart{
+ ID: "file_descriptor_io",
+ Title: "Read and written data",
+ Units: "bytes/s",
+ Fam: "io",
+ Ctx: "clickhouse.io",
+ Priority: prioIO,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorReadBytes", Name: "reads", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWriteBytes", Name: "writes", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ chartFileDescriptorIOPS = module.Chart{
+ ID: "file_descriptor_iops",
+ Title: "Read and write operations",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.iops",
+ Priority: prioIOPS,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorRead", Name: "reads", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWrite", Name: "writes", Mul: -1, Algo: module.Incremental},
+ },
+ }
+ chartFileDescriptorIOErrors = module.Chart{
+ ID: "file_descriptor_io_errors",
+ Title: "Read and write errors",
+ Units: "errors/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_errors",
+ Priority: prioIOErrors,
+ Dims: module.Dims{
+ {ID: "events_ReadBufferFromFileDescriptorReadFailed", Name: "read", Algo: module.Incremental},
+ {ID: "events_WriteBufferFromFileDescriptorWriteFailed", Name: "write", Algo: module.Incremental},
+ },
+ }
+ chartIOSeeks = module.Chart{
+ ID: "io_seeks",
+ Title: "lseek function calls",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_seeks",
+ Priority: prioIOSeeks,
+ Dims: module.Dims{
+ {ID: "events_Seek", Name: "lseek", Algo: module.Incremental},
+ },
+ }
+ chartIOFileOpens = module.Chart{
+ ID: "io_file_opens",
+ Title: "File opens",
+ Units: "ops/s",
+ Fam: "io",
+ Ctx: "clickhouse.io_file_opens",
+ Priority: prioIOFileOpens,
+ Dims: module.Dims{
+ {ID: "events_FileOpen", Name: "file_open", Algo: module.Incremental},
+ },
+ }
+)
+
+var tableChartsTmpl = module.Charts{
+ tableSizeChartTmpl.Copy(),
+ tablePartsChartTmpl.Copy(),
+ tableRowsChartTmpl.Copy(),
+}
+
+var (
+ tableSizeChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_size",
+ Title: "Table size",
+ Units: "bytes",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_size",
+ Type: module.Area,
+ Priority: prioDatabaseTableSize,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_size_bytes", Name: "size"},
+ },
+ }
+ tablePartsChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_parts",
+ Title: "Table parts",
+ Units: "parts",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_parts",
+ Priority: prioDatabaseTableParts,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_parts", Name: "parts"},
+ },
+ }
+ tableRowsChartTmpl = module.Chart{
+ ID: "table_%s_database_%s_rows",
+ Title: "Table rows",
+ Units: "rows",
+ Fam: "tables",
+ Ctx: "clickhouse.database_table_rows",
+ Priority: prioDatabaseTableRows,
+ Dims: module.Dims{
+ {ID: "table_%s_database_%s_rows", Name: "rows"},
+ },
+ }
+)
+
+var (
+ chartReplicatedPartsActivity = module.Chart{
+ ID: "replicated_parts_activity",
+ Title: "Replicated parts current activity",
+ Units: "parts",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_parts_current_activity",
+ Priority: prioReplicatedPartsCurrentActivity,
+ Dims: module.Dims{
+ {ID: "metrics_ReplicatedFetch", Name: "fetch"},
+ {ID: "metrics_ReplicatedSend", Name: "send"},
+ {ID: "metrics_ReplicatedChecks", Name: "check"},
+ },
+ }
+ chartReplicasMaxAbsoluteDelay = module.Chart{
+ ID: "replicas_max_absolute_delay",
+ Title: "Replicas max absolute delay",
+ Units: "seconds",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicas_max_absolute_delay",
+ Priority: prioReplicasMaxAbsoluteDelay,
+ Dims: module.Dims{
+ {ID: "async_metrics_ReplicasMaxAbsoluteDelay", Name: "replication_delay", Div: precision},
+ },
+ }
+ chartReadonlyReplica = module.Chart{
+ ID: "readonly_replica",
+ Title: "Replicated tables in readonly state",
+ Units: "tables",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_readonly_tables",
+ Priority: prioReadOnlyReplica,
+ Dims: module.Dims{
+ {ID: "metrics_ReadonlyReplica", Name: "read_only"},
+ },
+ }
+ chartReplicatedDataLoss = module.Chart{
+ ID: "replicated_data_loss",
+ Title: "Replicated data loss",
+ Units: "events/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_data_loss",
+ Priority: prioReplicatedDataLoss,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedDataLoss", Name: "data_loss", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartFetches = module.Chart{
+ ID: "replicated_part_fetches",
+ Title: "Replicated part fetches",
+ Units: "fetches/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_fetches",
+ Priority: prioReplicatedPartFetches,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartFetches", Name: "successful", Algo: module.Incremental},
+ {ID: "events_ReplicatedPartFailedFetches", Name: "failed", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartFetchesOfMerged = module.Chart{
+ ID: "replicated_part_fetches_of_merged",
+ Title: "Replicated part fetches of merged",
+ Units: "fetches/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_fetches_of_merged",
+ Priority: prioReplicatedPartFetchesOfMerged,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartFetchesOfMerged", Name: "merged", Algo: module.Incremental},
+ },
+ }
+ chartReplicatedPartMerges = module.Chart{
+ ID: "replicated_part_merges",
+ Title: "Replicated part merges",
+ Units: "merges/s",
+ Fam: "replicas",
+ Ctx: "clickhouse.replicated_part_merges",
+ Priority: prioReplicatedPartMerges,
+ Dims: module.Dims{
+ {ID: "events_ReplicatedPartMerges", Name: "merges", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartInsertedBytes = module.Chart{
+ ID: "inserted_bytes",
+ Title: "Inserted data",
+ Units: "bytes/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.inserted_bytes",
+ Priority: prioInsertedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_InsertedBytes", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartInsertedRows = module.Chart{
+ ID: "inserted_rows",
+ Title: "Inserted rows",
+ Units: "rows/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.inserted_rows",
+ Priority: prioInsertedRows,
+ Dims: module.Dims{
+ {ID: "events_InsertedRows", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartRejectedInserts = module.Chart{
+ ID: "rejected_inserts",
+ Title: "Rejected inserts",
+ Units: "inserts/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.rejected_inserts",
+ Priority: prioRejectedInserts,
+ Dims: module.Dims{
+ {ID: "events_RejectedInserts", Name: "rejected", Algo: module.Incremental},
+ },
+ }
+ chartDelayedInserts = module.Chart{
+ ID: "delayed_inserts",
+ Title: "Delayed inserts",
+ Units: "inserts/s",
+ Fam: "inserts",
+ Ctx: "clickhouse.delayed_inserts",
+ Priority: prioDelayedInserts,
+ Dims: module.Dims{
+ {ID: "events_DelayedInserts", Name: "delayed", Algo: module.Incremental},
+ },
+ }
+ chartDelayedInsertsThrottleTime = module.Chart{
+ ID: "delayed_inserts_throttle_time",
+ Title: "Delayed inserts throttle time",
+ Units: "milliseconds",
+ Fam: "inserts",
+ Ctx: "clickhouse.delayed_inserts_throttle_time",
+ Priority: prioDelayedInsertsThrottleTime,
+ Dims: module.Dims{
+ {ID: "events_DelayedInsertsMilliseconds", Name: "delayed_inserts_throttle_time", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartSelectedBytes = module.Chart{
+ ID: "selected_bytes",
+ Title: "Selected data",
+ Units: "bytes/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_bytes",
+ Priority: prioSelectedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_SelectedBytes", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedRows = module.Chart{
+ ID: "selected_rows",
+ Title: "Selected rows",
+ Units: "rows/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_rows",
+ Priority: prioSelectedRows,
+ Dims: module.Dims{
+ {ID: "events_SelectedRows", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedParts = module.Chart{
+ ID: "selected_parts",
+ Title: "Selected parts",
+ Units: "parts/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_parts",
+ Priority: prioSelectedParts,
+ Dims: module.Dims{
+ {ID: "events_SelectedParts", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedRanges = module.Chart{
+ ID: "selected_ranges",
+ Title: "Selected ranges",
+ Units: "ranges/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_ranges",
+ Priority: prioSelectedRanges,
+ Dims: module.Dims{
+ {ID: "events_SelectedRanges", Name: "selected", Algo: module.Incremental},
+ },
+ }
+ chartSelectedMarks = module.Chart{
+ ID: "selected_marks",
+ Title: "Selected marks",
+ Units: "marks/s",
+ Fam: "selects",
+ Ctx: "clickhouse.selected_marks",
+ Priority: prioSelectedMarks,
+ Dims: module.Dims{
+ {ID: "events_SelectedMarks", Name: "selected", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMerges = module.Chart{
+ ID: "merges",
+ Title: "Merge operations",
+ Units: "ops/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merges",
+ Priority: prioMerges,
+ Dims: module.Dims{
+ {ID: "events_Merge", Name: "merge", Algo: module.Incremental},
+ },
+ }
+ chartMergesLatency = module.Chart{
+ ID: "merges_latency",
+ Title: "Time spent for background merges",
+ Units: "milliseconds",
+ Fam: "merges",
+ Ctx: "clickhouse.merges_latency",
+ Priority: prioMergesLatency,
+ Dims: module.Dims{
+ {ID: "events_MergesTimeMilliseconds", Name: "merges_time", Algo: module.Incremental},
+ },
+ }
+ chartMergedUncompressedBytes = module.Chart{
+ ID: "merged_uncompressed_bytes",
+ Title: "Uncompressed data read for background merges",
+ Units: "bytes/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merged_uncompressed_bytes",
+ Priority: prioMergedUncompressedBytes,
+ Type: module.Area,
+ Dims: module.Dims{
+ {ID: "events_MergedUncompressedBytes", Name: "merged_uncompressed", Algo: module.Incremental},
+ },
+ }
+ chartMergedRows = module.Chart{
+ ID: "merged_rows",
+ Title: "Merged rows",
+ Units: "rows/s",
+ Fam: "merges",
+ Ctx: "clickhouse.merged_rows",
+ Priority: prioMergedRows,
+ Dims: module.Dims{
+ {ID: "events_MergedRows", Name: "merged", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMergeTreeDataWriterInsertedRows = module.Chart{
+ ID: "merge_tree_data_writer_inserted_rows",
+ Title: "Rows INSERTed to MergeTree tables",
+ Units: "rows/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_inserted_rows",
+ Priority: prioMergeTreeDataWriterRows,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterRows", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartMergeTreeDataWriterUncompressedBytes = module.Chart{
+ ID: "merge_tree_data_writer_uncompressed_bytes",
+ Title: "Data INSERTed to MergeTree tables",
+ Units: "bytes/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_uncompressed_bytes",
+ Type: module.Area,
+ Priority: prioMergeTreeDataWriterUncompressedBytes,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterUncompressedBytes", Name: "inserted", Algo: module.Incremental},
+ },
+ }
+ chartMergeTreeDataWriterCompressedBytes = module.Chart{
+ ID: "merge_tree_data_writer_compressed_bytes",
+ Title: "Data written to disk for data INSERTed to MergeTree tables",
+ Units: "bytes/s",
+ Fam: "merge tree",
+ Ctx: "clickhouse.merge_tree_data_writer_compressed_bytes",
+ Type: module.Area,
+ Priority: prioMergeTreeDataWriterCompressedBytes,
+ Dims: module.Dims{
+ {ID: "events_MergeTreeDataWriterCompressedBytes", Name: "written", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartUncompressedCacheRequests = module.Chart{
+ ID: "uncompressed_cache_requests",
+ Title: "Uncompressed cache requests",
+ Units: "requests/s",
+ Fam: "cache",
+ Ctx: "clickhouse.uncompressed_cache_requests",
+ Priority: prioUncompressedCacheRequests,
+ Dims: module.Dims{
+ {ID: "events_UncompressedCacheHits", Name: "hits", Algo: module.Incremental},
+ {ID: "events_UncompressedCacheMisses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+ chartMarkCacheRequests = module.Chart{
+ ID: "mark_cache_requests",
+ Title: "Mark cache requests",
+ Units: "requests/s",
+ Fam: "cache",
+ Ctx: "clickhouse.mark_cache_requests",
+ Priority: prioMarkCacheRequests,
+ Dims: module.Dims{
+ {ID: "events_MarkCacheHits", Name: "hits", Algo: module.Incremental},
+ {ID: "events_MarkCacheMisses", Name: "misses", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartMaxPartCountForPartition = module.Chart{
+ ID: "max_part_count_for_partition",
+ Title: "Max part count for partition",
+ Units: "parts",
+ Fam: "parts",
+ Ctx: "clickhouse.max_part_count_for_partition",
+ Priority: prioMaxPartCountForPartition,
+ Dims: module.Dims{
+ {ID: "async_metrics_MaxPartCountForPartition", Name: "max_parts_partition"},
+ },
+ }
+ chartPartsCount = module.Chart{
+ ID: "parts_count",
+ Title: "Parts",
+ Units: "parts",
+ Fam: "parts",
+ Ctx: "clickhouse.parts_count",
+ Priority: prioParts,
+ Dims: module.Dims{
+ {ID: "metrics_PartsTemporary", Name: "temporary"},
+ {ID: "metrics_PartsPreActive", Name: "pre_active"},
+ {ID: "metrics_PartsActive", Name: "active"},
+ {ID: "metrics_PartsDeleting", Name: "deleting"},
+ {ID: "metrics_PartsDeleteOnDestroy", Name: "delete_on_destroy"},
+ {ID: "metrics_PartsOutdated", Name: "outdated"},
+ {ID: "metrics_PartsWide", Name: "wide"},
+ {ID: "metrics_PartsCompact", Name: "compact"},
+ },
+ }
+)
+
+var (
+ chartDistributedConnections = module.Chart{
+ ID: "distributes_connections",
+ Title: "Active distributed connection",
+ Units: "connections",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections",
+ Priority: prioDistributedSend,
+ Dims: module.Dims{
+ {ID: "metrics_DistributedSend", Name: "active"},
+ },
+ }
+ chartDistributedConnectionAttempts = module.Chart{
+ ID: "distributes_connections_attempts",
+ Title: "Distributed connection attempts",
+ Units: "attempts/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_attempts",
+ Priority: prioDistributedConnectionTries,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionTries", Name: "connection", Algo: module.Incremental},
+ },
+ }
+ chartDistributedConnectionFailRetries = module.Chart{
+ ID: "distributes_connections_fail_retries",
+ Title: "Distributed connection fails with retry",
+ Units: "fails/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_fail_retries",
+ Priority: prioDistributedConnectionFailTry,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionFailTry", Name: "connection_retry", Algo: module.Incremental},
+ },
+ }
+ chartDistributedConnectionFailExhaustedRetries = module.Chart{
+ ID: "distributes_connections_fail_exhausted_retries",
+ Title: "Distributed connection fails after all retries finished",
+ Units: "fails/s",
+ Fam: "distributed conns",
+ Ctx: "clickhouse.distributed_connections_fail_exhausted_retries",
+ Priority: prioDistributedConnectionFailAtAll,
+ Dims: module.Dims{
+ {ID: "events_DistributedConnectionFailAtAll", Name: "connection_retry_exhausted", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartDistributedFilesToInsert = module.Chart{
+ ID: "distributes_files_to_insert",
+ Title: "Pending files to process for asynchronous insertion into Distributed tables",
+ Units: "files",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_files_to_insert",
+ Priority: prioDistributedFilesToInsert,
+ Dims: module.Dims{
+ {ID: "metrics_DistributedFilesToInsert", Name: "pending_insertions"},
+ },
+ }
+ chartDistributedRejectedInserts = module.Chart{
+ ID: "distributes_rejected_inserts",
+ Title: "Rejected INSERTs to a Distributed table",
+ Units: "inserts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_rejected_inserts",
+ Priority: prioDistributedRejectedInserts,
+ Dims: module.Dims{
+ {ID: "events_DistributedRejectedInserts", Name: "rejected", Algo: module.Incremental},
+ },
+ }
+ chartDistributedDelayedInserts = module.Chart{
+ ID: "distributes_delayed_inserts",
+ Title: "Delayed INSERTs to a Distributed table",
+ Units: "inserts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_delayed_inserts",
+ Priority: prioDistributedDelayedInserts,
+ Dims: module.Dims{
+ {ID: "events_DistributedDelayedInserts", Name: "delayed", Algo: module.Incremental},
+ },
+ }
+ chartDistributedDelayedInsertsLatency = module.Chart{
+ ID: "distributes_delayed_inserts_latency",
+ Title: "Time spent while the INSERT of a block to a Distributed table was throttled",
+ Units: "milliseconds",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_delayed_inserts_latency",
+ Priority: prioDistributedDelayedInsertsMilliseconds,
+ Dims: module.Dims{
+ {ID: "events_DistributedDelayedInsertsMilliseconds", Name: "delayed_time", Algo: module.Incremental},
+ },
+ }
+ chartDistributedSyncInsertionTimeoutExceeded = module.Chart{
+ ID: "distributes_sync_insertion_timeout_exceeded",
+ Title: "Distributed table sync insertions timeouts",
+ Units: "timeouts/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_sync_insertion_timeout_exceeded",
+ Priority: prioDistributedSyncInsertionTimeoutExceeded,
+ Dims: module.Dims{
+ {ID: "events_DistributedSyncInsertionTimeoutExceeded", Name: "sync_insertion", Algo: module.Incremental},
+ },
+ }
+ chartDistributedAsyncInsertionFailures = module.Chart{
+ ID: "distributes_async_insertions_failures",
+ Title: "Distributed table async insertion failures",
+ Units: "failures/s",
+ Fam: "distributed inserts",
+ Ctx: "clickhouse.distributed_async_insertions_failures",
+ Priority: prioDistributedAsyncInsertionFailures,
+ Dims: module.Dims{
+ {ID: "events_DistributedAsyncInsertionFailures", Name: "async_insertions", Algo: module.Incremental},
+ },
+ }
+)
+
+var (
+ chartUptime = module.Chart{
+ ID: "uptime",
+ Title: "Uptime",
+ Units: "seconds",
+ Fam: "uptime",
+ Ctx: "clickhouse.uptime",
+ Priority: prioUptime,
+ Dims: module.Dims{
+ {ID: "async_metrics_Uptime", Name: "uptime"},
+ },
+ }
+)
+
+func (c *ClickHouse) addDiskCharts(disk *seenDisk) {
+ charts := diskChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, disk.disk)
+ chart.Labels = []module.Label{
+ {Key: "disk_name", Value: disk.disk},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, disk.disk)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *ClickHouse) removeDiskCharts(disk *seenDisk) {
+ px := fmt.Sprintf("disk_%s_", disk.disk)
+ c.removeCharts(px)
+}
+
+func (c *ClickHouse) addTableCharts(table *seenTable) {
+ charts := tableChartsTmpl.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, table.table, table.db)
+ chart.Labels = []module.Label{
+ {Key: "database", Value: table.db},
+ {Key: "table", Value: table.table},
+ }
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, table.table, table.db)
+ }
+ }
+
+ if err := c.Charts().Add(*charts...); err != nil {
+ c.Warning(err)
+ }
+}
+
+func (c *ClickHouse) removeTableCharts(table *seenTable) {
+ px := fmt.Sprintf("table_%s_database_%s_", table.table, table.db)
+ c.removeCharts(px)
+}
+
+func (c *ClickHouse) removeCharts(prefix string) {
+ for _, chart := range *c.Charts() {
+ if strings.HasPrefix(chart.ID, prefix) {
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse.go b/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse.go
new file mode 100644
index 000000000..21b7f1d3f
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse.go
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ _ "embed"
+ "errors"
+ "net/http"
+ "time"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+//go:embed "config_schema.json"
+var configSchema string
+
+func init() {
+ module.Register("clickhouse", module.Creator{
+ Create: func() module.Module { return New() },
+ Config: func() any { return &Config{} },
+ JobConfigSchema: configSchema,
+ })
+}
+
+func New() *ClickHouse {
+ return &ClickHouse{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8123",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ },
+ },
+ charts: chCharts.Copy(),
+ seenDisks: make(map[string]*seenDisk),
+ seenDbTables: make(map[string]*seenTable),
+ }
+}
+
+type Config struct {
+ UpdateEvery int `yaml:"update_every,omitempty" json:"update_every"`
+ web.HTTP `yaml:",inline" json:""`
+}
+
+type (
+ ClickHouse struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+
+ seenDisks map[string]*seenDisk
+ seenDbTables map[string]*seenTable
+ }
+ seenDisk struct{ disk string }
+ seenTable struct{ db, table string }
+)
+
+func (c *ClickHouse) Configuration() any {
+ return c.Config
+}
+
+func (c *ClickHouse) Init() error {
+ if err := c.validateConfig(); err != nil {
+ c.Errorf("config validation: %v", err)
+ return err
+ }
+
+ httpClient, err := c.initHTTPClient()
+ if err != nil {
+ c.Errorf("init HTTP client: %v", err)
+ return err
+ }
+ c.httpClient = httpClient
+
+ c.Debugf("using URL %s", c.URL)
+ c.Debugf("using timeout: %s", c.Timeout)
+
+ return nil
+}
+
+func (c *ClickHouse) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
+}
+
+func (c *ClickHouse) Charts() *module.Charts {
+ return c.charts
+}
+
+func (c *ClickHouse) Collect() map[string]int64 {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ }
+
+ if len(mx) == 0 {
+ return nil
+ }
+
+ return mx
+}
+
+func (c *ClickHouse) Cleanup() {
+ if c.httpClient != nil {
+ c.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse_test.go b/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse_test.go
new file mode 100644
index 000000000..de78bed43
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/clickhouse_test.go
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "os"
+ "testing"
+
+ "github.com/netdata/netdata/go/go.d.plugin/agent/module"
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRespSystemAsyncMetrics, _ = os.ReadFile("testdata/resp_system_async_metrics.csv")
+ dataRespSystemMetrics, _ = os.ReadFile("testdata/resp_system_metrics.csv")
+ dataRespSystemEvents, _ = os.ReadFile("testdata/resp_system_events.csv")
+ dataRespSystemParts, _ = os.ReadFile("testdata/resp_system_parts.csv")
+ dataRespSystemDisks, _ = os.ReadFile("testdata/resp_system_disks.csv")
+ dataRespLongestQueryTime, _ = os.ReadFile("testdata/resp_longest_query_time.csv")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRespSystemAsyncMetrics": dataRespSystemAsyncMetrics,
+ "dataRespSystemMetrics": dataRespSystemMetrics,
+ "dataRespSystemEvents": dataRespSystemEvents,
+ "dataRespSystemParts": dataRespSystemParts,
+ "dataRespSystemDisks": dataRespSystemDisks,
+ "dataRespLongestQueryTime": dataRespLongestQueryTime,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestClickhouse_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ClickHouse{}, dataConfigJSON, dataConfigYAML)
+}
+
+func TestClickHouse_Init(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ config Config
+ }{
+ "success with default": {
+ wantFail: false,
+ config: New().Config,
+ },
+ "fail when URL not set": {
+ wantFail: true,
+ config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{URL: ""},
+ },
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click := New()
+ click.Config = test.config
+
+ if test.wantFail {
+ assert.Error(t, click.Init())
+ } else {
+ assert.NoError(t, click.Init())
+ }
+ })
+ }
+}
+
+func TestClickHouse_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestClickHouse_Check(t *testing.T) {
+ tests := map[string]struct {
+ wantFail bool
+ prepare func(t *testing.T) (*ClickHouse, func())
+ }{
+ "success on valid response": {
+ wantFail: false,
+ prepare: prepareCaseOk,
+ },
+ "fails on unexpected response": {
+ wantFail: true,
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on connection refused": {
+ wantFail: true,
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click, cleanup := test.prepare(t)
+ defer cleanup()
+
+ if test.wantFail {
+ assert.Error(t, click.Check())
+ } else {
+ assert.NoError(t, click.Check())
+ }
+ })
+ }
+}
+
+func TestClickHouse_Collect(t *testing.T) {
+ tests := map[string]struct {
+ prepare func(t *testing.T) (*ClickHouse, func())
+ wantMetrics map[string]int64
+ }{
+ "success on valid response": {
+ prepare: prepareCaseOk,
+ wantMetrics: map[string]int64{
+ "LongestRunningQueryTime": 73,
+ "async_metrics_MaxPartCountForPartition": 7,
+ "async_metrics_ReplicasMaxAbsoluteDelay": 0,
+ "async_metrics_Uptime": 64380,
+ "disk_default_free_space_bytes": 165494767616,
+ "disk_default_used_space_bytes": 45184565248,
+ "events_DelayedInserts": 0,
+ "events_DelayedInsertsMilliseconds": 0,
+ "events_DistributedAsyncInsertionFailures": 0,
+ "events_DistributedConnectionFailAtAll": 0,
+ "events_DistributedConnectionFailTry": 0,
+ "events_DistributedConnectionTries": 0,
+ "events_DistributedDelayedInserts": 0,
+ "events_DistributedDelayedInsertsMilliseconds": 0,
+ "events_DistributedRejectedInserts": 0,
+ "events_DistributedSyncInsertionTimeoutExceeded": 0,
+ "events_FailedInsertQuery": 0,
+ "events_FailedQuery": 0,
+ "events_FailedSelectQuery": 0,
+ "events_FileOpen": 1568962,
+ "events_InsertQuery": 0,
+ "events_InsertQueryTimeMicroseconds": 0,
+ "events_InsertedBytes": 0,
+ "events_InsertedRows": 0,
+ "events_MarkCacheHits": 0,
+ "events_MarkCacheMisses": 0,
+ "events_Merge": 0,
+ "events_MergeTreeDataWriterCompressedBytes": 0,
+ "events_MergeTreeDataWriterRows": 0,
+ "events_MergeTreeDataWriterUncompressedBytes": 0,
+ "events_MergedRows": 0,
+ "events_MergedUncompressedBytes": 0,
+ "events_MergesTimeMilliseconds": 0,
+ "events_Query": 0,
+ "events_QueryMemoryLimitExceeded": 0,
+ "events_QueryPreempted": 0,
+ "events_QueryTimeMicroseconds": 0,
+ "events_ReadBackoff": 0,
+ "events_ReadBufferFromFileDescriptorRead": 0,
+ "events_ReadBufferFromFileDescriptorReadBytes": 0,
+ "events_ReadBufferFromFileDescriptorReadFailed": 0,
+ "events_RejectedInserts": 0,
+ "events_ReplicatedDataLoss": 0,
+ "events_ReplicatedPartFailedFetches": 0,
+ "events_ReplicatedPartFetches": 0,
+ "events_ReplicatedPartFetchesOfMerged": 0,
+ "events_ReplicatedPartMerges": 0,
+ "events_Seek": 0,
+ "events_SelectQuery": 0,
+ "events_SelectQueryTimeMicroseconds": 0,
+ "events_SelectedBytes": 0,
+ "events_SelectedMarks": 0,
+ "events_SelectedParts": 0,
+ "events_SelectedRanges": 0,
+ "events_SelectedRows": 0,
+ "events_SlowRead": 0,
+ "events_SuccessfulInsertQuery": 0,
+ "events_SuccessfulQuery": 0,
+ "events_SuccessfulSelectQuery": 0,
+ "events_UncompressedCacheHits": 0,
+ "events_UncompressedCacheMisses": 0,
+ "events_WriteBufferFromFileDescriptorWrite": 0,
+ "events_WriteBufferFromFileDescriptorWriteBytes": 0,
+ "events_WriteBufferFromFileDescriptorWriteFailed": 0,
+ "metrics_DistributedFilesToInsert": 0,
+ "metrics_DistributedSend": 0,
+ "metrics_HTTPConnection": 0,
+ "metrics_InterserverConnection": 0,
+ "metrics_MemoryTracking": 1270999152,
+ "metrics_MySQLConnection": 0,
+ "metrics_PartsActive": 25,
+ "metrics_PartsCompact": 233,
+ "metrics_PartsDeleteOnDestroy": 0,
+ "metrics_PartsDeleting": 0,
+ "metrics_PartsOutdated": 284,
+ "metrics_PartsPreActive": 0,
+ "metrics_PartsTemporary": 0,
+ "metrics_PartsWide": 76,
+ "metrics_PostgreSQLConnection": 0,
+ "metrics_Query": 1,
+ "metrics_QueryPreempted": 0,
+ "metrics_ReadonlyReplica": 0,
+ "metrics_ReplicatedChecks": 0,
+ "metrics_ReplicatedFetch": 0,
+ "metrics_ReplicatedSend": 0,
+ "metrics_TCPConnection": 1,
+ "table_asynchronous_metric_log_database_system_parts": 6,
+ "table_asynchronous_metric_log_database_system_rows": 70377261,
+ "table_asynchronous_metric_log_database_system_size_bytes": 19113663,
+ "table_metric_log_database_system_parts": 6,
+ "table_metric_log_database_system_rows": 162718,
+ "table_metric_log_database_system_size_bytes": 18302533,
+ "table_processors_profile_log_database_system_parts": 5,
+ "table_processors_profile_log_database_system_rows": 20107,
+ "table_processors_profile_log_database_system_size_bytes": 391629,
+ "table_query_log_database_system_parts": 5,
+ "table_query_log_database_system_rows": 761,
+ "table_query_log_database_system_size_bytes": 196403,
+ "table_trace_log_database_system_parts": 8,
+ "table_trace_log_database_system_rows": 1733076,
+ "table_trace_log_database_system_size_bytes": 28695023,
+ },
+ },
+ "fails on unexpected response": {
+ prepare: prepareCaseUnexpectedResponse,
+ },
+ "fails on connection refused": {
+ prepare: prepareCaseConnectionRefused,
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ click, cleanup := test.prepare(t)
+ defer cleanup()
+
+ mx := click.Collect()
+
+ require.Equal(t, test.wantMetrics, mx)
+ if len(test.wantMetrics) > 0 {
+ testMetricsHasAllChartsDims(t, click, mx)
+ }
+ })
+ }
+}
+
+func testMetricsHasAllChartsDims(t *testing.T, click *ClickHouse, mx map[string]int64) {
+ for _, chart := range *click.Charts() {
+ if chart.Obsolete {
+ continue
+ }
+ for _, dim := range chart.Dims {
+ _, ok := mx[dim.ID]
+ assert.Truef(t, ok, "collected metrics has no data for dim '%s' chart '%s'", dim.ID, chart.ID)
+ }
+ }
+}
+
+func prepareCaseOk(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ switch r.URL.Query().Get("query") {
+ case querySystemEvents:
+ _, _ = w.Write(dataRespSystemEvents)
+ case querySystemMetrics:
+ _, _ = w.Write(dataRespSystemMetrics)
+ case querySystemAsyncMetrics:
+ _, _ = w.Write(dataRespSystemAsyncMetrics)
+ case querySystemParts:
+ _, _ = w.Write(dataRespSystemParts)
+ case querySystemDisks:
+ _, _ = w.Write(dataRespSystemDisks)
+ case queryLongestQueryTime:
+ _, _ = w.Write(dataRespLongestQueryTime)
+ default:
+ w.WriteHeader(http.StatusNotFound)
+ }
+ }))
+
+ click := New()
+ click.URL = srv.URL
+ require.NoError(t, click.Init())
+
+ return click, srv.Close
+}
+
+func prepareCaseUnexpectedResponse(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ srv := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ _, _ = w.Write([]byte("hello and\n goodbye"))
+ }))
+
+ click := New()
+ click.URL = srv.URL
+ require.NoError(t, click.Init())
+
+ return click, srv.Close
+}
+
+func prepareCaseConnectionRefused(t *testing.T) (*ClickHouse, func()) {
+ t.Helper()
+ click := New()
+ click.URL = "http://127.0.0.1:65001/stat"
+ require.NoError(t, click.Init())
+
+ return click, func() {}
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect.go
new file mode 100644
index 000000000..8bb756528
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect.go
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "encoding/csv"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "slices"
+)
+
+const precision = 1000
+
+func (c *ClickHouse) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ if err := c.collectSystemEvents(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemMetrics(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemAsyncMetrics(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemParts(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectSystemDisks(mx); err != nil {
+ return nil, err
+ }
+ if err := c.collectLongestRunningQueryTime(mx); err != nil {
+ return nil, err
+ }
+
+ return mx, nil
+}
+
+func (c *ClickHouse) doOKDecodeCSV(req *http.Request, assign func(column, value string, lineEnd bool)) error {
+ resp, err := c.httpClient.Do(req)
+ if err != nil {
+ return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
+ }
+ defer closeBody(resp)
+
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
+ }
+
+ return readCSVResponseData(resp.Body, assign)
+}
+
+func readCSVResponseData(reader io.Reader, assign func(column, value string, lineEnd bool)) error {
+ r := csv.NewReader(reader)
+ r.ReuseRecord = true
+
+ var columns []string
+
+ for {
+ record, err := r.Read()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+
+ if len(columns) == 0 {
+ columns = slices.Clone(record)
+ continue
+ }
+
+ if len(columns) != len(record) {
+ return fmt.Errorf("column count mismatch: %d vs %d", len(columns), len(record))
+ }
+
+ for i, l := 0, len(record); i < l; i++ {
+ assign(columns[i], record[i], i == l-1)
+ }
+ }
+
+ return nil
+}
+
+func makeURLQuery(q string) string {
+ return url.Values{"query": {q}}.Encode()
+}
+
+func closeBody(resp *http.Response) {
+ if resp != nil && resp.Body != nil {
+ _, _ = io.Copy(io.Discard, resp.Body)
+ _ = resp.Body.Close()
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_async_metrics.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_async_metrics.go
new file mode 100644
index 000000000..46b8fed49
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_async_metrics.go
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const querySystemAsyncMetrics = `
+SELECT
+ metric,
+ value
+FROM
+ system.asynchronous_metrics
+where
+ metric LIKE 'Uptime'
+ OR metric LIKE 'MaxPartCountForPartition'
+ OR metric LIKE 'ReplicasMaxAbsoluteDelay' FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemAsyncMetrics(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemAsyncMetrics)
+
+ want := map[string]float64{
+ "Uptime": 1,
+ "MaxPartCountForPartition": 1,
+ "ReplicasMaxAbsoluteDelay": precision,
+ }
+
+ px := "async_metrics_"
+ var metric string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "metric":
+ metric = value
+ case "value":
+ mul, ok := want[metric]
+ if !ok {
+ return
+ }
+ n++
+ if v, err := strconv.ParseFloat(value, 64); err == nil {
+ mx[px+metric] = int64(v * mul)
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system async metrics data returned")
+ }
+
+ return nil
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_disks.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_disks.go
new file mode 100644
index 000000000..7e1dbb8d0
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_disks.go
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const querySystemDisks = `
+SELECT
+ name,
+ sum(free_space) as free_space,
+ sum(total_space) as total_space
+FROM
+ system.disks
+GROUP BY
+ name FORMAT CSVWithNames
+`
+
+type diskStats struct {
+ name string
+ totalBytes int64
+ freeBytes int64
+}
+
+func (c *ClickHouse) collectSystemDisks(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemDisks)
+
+ seen := make(map[string]*diskStats)
+
+ getDisk := func(name string) *diskStats {
+ s, ok := seen[name]
+ if !ok {
+ s = &diskStats{name: name}
+ seen[name] = s
+ }
+ return s
+ }
+
+ var name string
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "name":
+ name = value
+ case "free_space":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getDisk(name).freeBytes = v
+ case "total_space":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getDisk(name).totalBytes = v
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, disk := range seen {
+ if _, ok := c.seenDisks[disk.name]; !ok {
+ v := &seenDisk{disk: disk.name}
+ c.seenDisks[disk.name] = v
+ c.addDiskCharts(v)
+ }
+
+ px := "disk_" + disk.name + "_"
+
+ mx[px+"free_space_bytes"] = disk.freeBytes
+ mx[px+"used_space_bytes"] = disk.totalBytes - disk.freeBytes
+ }
+
+ for k, v := range c.seenDisks {
+ if _, ok := seen[k]; !ok {
+ delete(c.seenDisks, k)
+ c.removeDiskCharts(v)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_events.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_events.go
new file mode 100644
index 000000000..94d996162
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_events.go
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const querySystemEvents = `
+SELECT
+ event,
+ value
+FROM
+ system.events FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemEvents(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemEvents)
+
+ px := "events_"
+ var event string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "event":
+ event = value
+ case "value":
+ if !wantSystemEvents[event] {
+ return
+ }
+ n++
+ if v, err := strconv.ParseInt(value, 10, 64); err == nil {
+ mx[px+event] = v
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system events data returned")
+ }
+
+ // CH doesn't expose events with 0 values
+ for k := range wantSystemEvents {
+ k = px + k
+ if _, ok := mx[k]; !ok {
+ mx[k] = 0
+ }
+ }
+
+ mx["events_SuccessfulQuery"] = mx["events_Query"] - mx["events_FailedQuery"]
+ mx["events_SuccessfulSelectQuery"] = mx["events_SelectQuery"] - mx["events_FailedSelectQuery"]
+ mx["events_SuccessfulInsertQuery"] = mx["events_InsertQuery"] - mx["events_FailedInsertQuery"]
+
+ return nil
+}
+
+var wantSystemEvents = map[string]bool{
+ "SlowRead": true,
+ "ReadBackoff": true,
+ "Query": true,
+ "FailedQuery": true,
+ "QueryTimeMicroseconds": true,
+ "SelectQuery": true,
+ "FailedSelectQuery": true,
+ "SelectQueryTimeMicroseconds": true,
+ "InsertQuery": true,
+ "FailedInsertQuery": true,
+ "InsertQueryTimeMicroseconds": true,
+ "QueryPreempted": true,
+ "QueryMemoryLimitExceeded": true,
+ "InsertedRows": true,
+ "InsertedBytes": true,
+ "DelayedInserts": true,
+ "DelayedInsertsMilliseconds": true,
+ "RejectedInserts": true,
+ "SelectedRows": true,
+ "SelectedBytes": true,
+ "SelectedParts": true,
+ "SelectedRanges": true,
+ "SelectedMarks": true,
+ "Merge": true,
+ "MergedRows": true,
+ "MergedUncompressedBytes": true,
+ "MergesTimeMilliseconds": true,
+ "MergeTreeDataWriterRows": true,
+ "MergeTreeDataWriterUncompressedBytes": true,
+ "MergeTreeDataWriterCompressedBytes": true,
+ "UncompressedCacheHits": true,
+ "UncompressedCacheMisses": true,
+ "MarkCacheHits": true,
+ "MarkCacheMisses": true,
+ "Seek": true,
+ "FileOpen": true,
+ "ReadBufferFromFileDescriptorReadBytes": true,
+ "WriteBufferFromFileDescriptorWriteBytes": true,
+ "ReadBufferFromFileDescriptorRead": true,
+ "WriteBufferFromFileDescriptorWrite": true,
+ "ReadBufferFromFileDescriptorReadFailed": true,
+ "WriteBufferFromFileDescriptorWriteFailed": true,
+ "DistributedConnectionTries": true,
+ "DistributedConnectionFailTry": true,
+ "DistributedConnectionFailAtAll": true,
+ "DistributedRejectedInserts": true,
+ "DistributedDelayedInserts": true,
+ "DistributedDelayedInsertsMilliseconds": true,
+ "DistributedSyncInsertionTimeoutExceeded": true,
+ "DistributedAsyncInsertionFailures": true,
+ "ReplicatedDataLoss": true,
+ "ReplicatedPartFetches": true,
+ "ReplicatedPartFailedFetches": true,
+ "ReplicatedPartMerges": true,
+ "ReplicatedPartFetchesOfMerged": true,
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_metrics.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_metrics.go
new file mode 100644
index 000000000..f7c3981c8
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_metrics.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const querySystemMetrics = `
+SELECT
+ metric,
+ value
+FROM
+ system.metrics FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectSystemMetrics(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemMetrics)
+
+ px := "metrics_"
+ var metric string
+ var n int
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "metric":
+ metric = value
+ case "value":
+ if !wantSystemMetrics[metric] {
+ return
+ }
+ n++
+ if v, err := strconv.ParseInt(value, 10, 64); err == nil {
+ mx[px+metric] = v
+ }
+ }
+ })
+ if err != nil {
+ return err
+ }
+ if n == 0 {
+ return errors.New("no system metrics data returned")
+ }
+
+ return nil
+}
+
+var wantSystemMetrics = map[string]bool{
+ "Query": true,
+ "TCPConnection": true,
+ "HTTPConnection": true,
+ "MySQLConnection": true,
+ "PostgreSQLConnection": true,
+ "InterserverConnection": true,
+ "MemoryTracking": true,
+ "QueryPreempted": true,
+ "ReplicatedFetch": true,
+ "ReplicatedSend": true,
+ "ReplicatedChecks": true,
+ "ReadonlyReplica": true,
+ "PartsTemporary": true,
+ "PartsPreActive": true,
+ "PartsActive": true,
+ "PartsDeleting": true,
+ "PartsDeleteOnDestroy": true,
+ "PartsOutdated": true,
+ "PartsWide": true,
+ "PartsCompact": true,
+ "DistributedSend": true,
+ "DistributedFilesToInsert": true,
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_parts.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_parts.go
new file mode 100644
index 000000000..08ffd602e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_parts.go
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const querySystemParts = `
+SELECT
+ database,
+ table,
+ sum(bytes) as bytes,
+ count() as parts,
+ sum(rows) as rows
+FROM
+ system.parts
+WHERE
+ active = 1
+GROUP BY
+ database,
+ table FORMAT CSVWithNames
+`
+
+type tableStats struct {
+ database string
+ table string
+ bytes int64
+ parts int64
+ rows int64
+}
+
+func (c *ClickHouse) collectSystemParts(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(querySystemParts)
+
+ seen := make(map[string]*tableStats)
+
+ getTable := func(db, table string) *tableStats {
+ k := table + db
+ s, ok := seen[k]
+ if !ok {
+ s = &tableStats{database: db, table: table}
+ seen[k] = s
+ }
+ return s
+ }
+
+ var database, table string
+
+ err := c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ switch column {
+ case "database":
+ database = value
+ case "table":
+ table = value
+ case "bytes":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).bytes = v
+ case "parts":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).parts = v
+ case "rows":
+ v, _ := strconv.ParseInt(value, 10, 64)
+ getTable(database, table).rows = v
+ }
+ })
+ if err != nil {
+ return err
+ }
+
+ for _, table := range seen {
+ k := table.table + table.database
+ if _, ok := c.seenDbTables[k]; !ok {
+ v := &seenTable{db: table.database, table: table.table}
+ c.seenDbTables[k] = v
+ c.addTableCharts(v)
+ }
+
+ px := fmt.Sprintf("table_%s_database_%s_", table.table, table.database)
+
+ mx[px+"size_bytes"] = table.bytes
+ mx[px+"parts"] = table.parts
+ mx[px+"rows"] = table.rows
+ }
+
+ for k, v := range c.seenDbTables {
+ if _, ok := seen[k]; !ok {
+ delete(c.seenDbTables, k)
+ c.removeTableCharts(v)
+ }
+ }
+
+ return nil
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_processes.go b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_processes.go
new file mode 100644
index 000000000..d31103a8f
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/collect_system_processes.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "strconv"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+const queryLongestQueryTime = `
+SELECT
+ toString(max(elapsed)) as value
+FROM
+ system.processes FORMAT CSVWithNames
+`
+
+func (c *ClickHouse) collectLongestRunningQueryTime(mx map[string]int64) error {
+ req, _ := web.NewHTTPRequest(c.Request)
+ req.URL.RawQuery = makeURLQuery(queryLongestQueryTime)
+
+ return c.doOKDecodeCSV(req, func(column, value string, lineEnd bool) {
+ if column == "value" {
+ if v, err := strconv.ParseFloat(value, 64); err == nil {
+ mx["LongestRunningQueryTime"] = int64(v * precision)
+ }
+ }
+ })
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/config_schema.json b/src/go/collectors/go.d.plugin/modules/clickhouse/config_schema.json
new file mode 100644
index 000000000..e8b0ed2be
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/config_schema.json
@@ -0,0 +1,177 @@
+{
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ClickHouse collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "Data collection interval, measured in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the ClickHouse server.",
+ "type": "string",
+ "default": "http://127.0.0.1:8123",
+ "format": "uri"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server.",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication.",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": [
+ "object",
+ "null"
+ ],
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "pattern": "^$|^/"
+ },
+ "body": {
+ "title": "Body",
+ "type": "string"
+ },
+ "method": {
+ "title": "Method",
+ "type": "string"
+ }
+ },
+ "required": [
+ "url"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^name$": {}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "body": {
+ "ui:widget": "hidden"
+ },
+ "method": {
+ "ui:widget": "hidden"
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
+ }
+ }
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/init.go b/src/go/collectors/go.d.plugin/modules/clickhouse/init.go
new file mode 100644
index 000000000..c8db54e40
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/init.go
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package clickhouse
+
+import (
+ "errors"
+ "net/http"
+
+ "github.com/netdata/netdata/go/go.d.plugin/pkg/web"
+)
+
+func (c *ClickHouse) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (c *ClickHouse) initHTTPClient() (*http.Client, error) {
+ return web.NewHTTPClient(c.Client)
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/integrations/clickhouse.md b/src/go/collectors/go.d.plugin/modules/clickhouse/integrations/clickhouse.md
new file mode 100644
index 000000000..27c0396d2
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/integrations/clickhouse.md
@@ -0,0 +1,333 @@
+<!--startmeta
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/go/collectors/go.d.plugin/modules/clickhouse/README.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml"
+sidebar_label: "ClickHouse"
+learn_status: "Published"
+learn_rel_path: "Collecting Metrics/Databases"
+most_popular: False
+message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
+endmeta-->
+
+# ClickHouse
+
+
+<img src="https://netdata.cloud/img/clickhouse.svg" width="150"/>
+
+
+Plugin: go.d.plugin
+Module: clickhouse
+
+<img src="https://img.shields.io/badge/maintained%20by-Netdata-%2300ab44" />
+
+## Overview
+
+This collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.
+
+
+It sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.
+Specifically, it collects metrics from the following tables:
+
+- system.metrics
+- system.async_metrics
+- system.events
+- system.disks
+- system.parts
+- system.processes
+
+
+This collector is supported on all platforms.
+
+This collector supports collecting metrics from multiple instances of this integration, including remote instances.
+
+
+### Default Behavior
+
+#### Auto-Detection
+
+By default, it detects ClickHouse instances running on localhost that are listening on port 8123.
+On startup, it tries to collect metrics from:
+
+- http://127.0.0.1:8123
+
+
+#### Limits
+
+The default configuration for this integration does not impose any limits on data collection.
+
+#### Performance Impact
+
+The default configuration for this integration is not expected to impose a significant performance impact on the system.
+
+
+## Metrics
+
+Metrics grouped by *scope*.
+
+The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
+
+
+
+### Per ClickHouse instance
+
+These metrics refer to the entire monitored application.
+
+This scope has no labels.
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.connections | tcp, http, mysql, postgresql, interserver | connections |
+| clickhouse.slow_reads | slow | reads/s |
+| clickhouse.read_backoff | read_backoff | events/s |
+| clickhouse.memory_usage | used | bytes |
+| clickhouse.running_queries | running | queries |
+| clickhouse.queries_preempted | preempted | queries |
+| clickhouse.queries | successful, failed | queries/s |
+| clickhouse.select_queries | successful, failed | selects/s |
+| clickhouse.insert_queries | successful, failed | inserts/s |
+| clickhouse.queries_memory_limit_exceeded | mem_limit_exceeded | queries/s |
+| clickhouse.longest_running_query_time | longest_query_time | seconds |
+| clickhouse.queries_latency | queries_time | microseconds |
+| clickhouse.select_queries_latency | selects_time | microseconds |
+| clickhouse.insert_queries_latency | inserts_time | microseconds |
+| clickhouse.io | reads, writes | bytes/s |
+| clickhouse.iops | reads, writes | ops/s |
+| clickhouse.io_errors | read, write | errors/s |
+| clickhouse.io_seeks | lseek | ops/s |
+| clickhouse.io_file_opens | file_open | ops/s |
+| clickhouse.replicated_parts_current_activity | fetch, send, check | parts |
+| clickhouse.replicas_max_absolute_dela | replication_delay | seconds |
+| clickhouse.replicated_readonly_tables | read_only | tables |
+| clickhouse.replicated_data_loss | data_loss | events |
+| clickhouse.replicated_part_fetches | successful, failed | fetches/s |
+| clickhouse.inserted_rows | inserted | rows/s |
+| clickhouse.inserted_bytes | inserted | bytes/s |
+| clickhouse.rejected_inserts | rejected | inserts/s |
+| clickhouse.delayed_inserts | delayed | inserts/s |
+| clickhouse.delayed_inserts_throttle_time | delayed_inserts_throttle_time | milliseconds |
+| clickhouse.selected_bytes | selected | bytes/s |
+| clickhouse.selected_rows | selected | rows/s |
+| clickhouse.selected_parts | selected | parts/s |
+| clickhouse.selected_ranges | selected | ranges/s |
+| clickhouse.selected_marks | selected | marks/s |
+| clickhouse.merges | merge | ops/s |
+| clickhouse.merges_latency | merges_time | milliseconds |
+| clickhouse.merged_uncompressed_bytes | merged_uncompressed | bytes/s |
+| clickhouse.merged_rows | merged | rows/s |
+| clickhouse.merge_tree_data_writer_inserted_rows | inserted | rows/s |
+| clickhouse.merge_tree_data_writer_uncompressed_bytes | inserted | bytes/s |
+| clickhouse.merge_tree_data_writer_compressed_bytes | written | bytes/s |
+| clickhouse.uncompressed_cache_requests | hits, misses | requests/s |
+| clickhouse.mark_cache_requests | hits, misses | requests/s |
+| clickhouse.max_part_count_for_partition | max_parts_partition | parts |
+| clickhouse.parts_count | temporary, pre_active, active, deleting, delete_on_destroy, outdated, wide, compact | parts |
+| distributed_connections | active | connections |
+| distributed_connections_attempts | connection | attempts/s |
+| distributed_connections_fail_retries | connection_retry | fails/s |
+| distributed_connections_fail_exhausted_retries | connection_retry_exhausted | fails/s |
+| distributed_files_to_insert | pending_insertions | files |
+| distributed_rejected_inserts | rejected | inserts/s |
+| distributed_delayed_inserts | delayed | inserts/s |
+| distributed_delayed_inserts_latency | delayed_time | milliseconds |
+| distributed_sync_insertion_timeout_exceeded | sync_insertion | timeouts/s |
+| distributed_async_insertions_failures | async_insertions | failures/s |
+| clickhouse.uptime | uptime | seconds |
+
+### Per disk
+
+These metrics refer to the Disk.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| disk_name | Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure). |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.disk_space_usage | free, used | bytes |
+
+### Per table
+
+These metrics refer to the Database Table.
+
+Labels:
+
+| Label | Description |
+|:-----------|:----------------|
+| database | Name of the database. |
+| table | Name of the table. |
+
+Metrics:
+
+| Metric | Dimensions | Unit |
+|:------|:----------|:----|
+| clickhouse.database_table_size | size | bytes |
+| clickhouse.database_table_parts | parts | parts |
+| clickhouse.database_table_rows | rows | rows |
+
+
+
+## Alerts
+
+
+The following alerts are available:
+
+| Alert name | On metric | Description |
+|:------------|:----------|:------------|
+| [ clickhouse_restarted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.uptime | ClickHouse has recently been restarted |
+| [ clickhouse_queries_preempted ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.queries_preempted | ClickHouse has queries that are stopped and waiting due to priority setting |
+| [ clickhouse_long_running_query ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.longest_running_query_time | ClickHouse has a long-running query exceeding the threshold |
+| [ clickhouse_rejected_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.rejected_inserts | ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree |
+| [ clickhouse_delayed_inserts ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.delayed_inserts | ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree |
+| [ clickhouse_replication_lag ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicas_max_absolute_delay | ClickHouse is experiencing replication lag greater than 5 minutes |
+| [ clickhouse_replicated_readonly_tables ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.replicated_readonly_tables | ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured |
+| [ clickhouse_max_part_count_for_partition ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.max_part_count_for_partition | ClickHouse high number of parts per partition |
+| [ clickhouse_distributed_connections_failures ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_connections_fail_exhausted_retries | ClickHouse has failed distributed connections after exhausting all retry attempts |
+| [ clickhouse_distributed_files_to_insert ](https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf) | clickhouse.distributed_files_to_insert | ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables |
+
+
+## Setup
+
+### Prerequisites
+
+No action required.
+
+### Configuration
+
+#### File
+
+The configuration file name for this integration is `go.d/clickhouse.conf`.
+
+
+You can edit the configuration file using the `edit-config` script from the
+Netdata [config directory](/docs/netdata-agent/configuration/README.md#the-netdata-config-directory).
+
+```bash
+cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
+sudo ./edit-config go.d/clickhouse.conf
+```
+#### Options
+
+The following options can be defined globally: update_every, autodetection_retry.
+
+
+<details open><summary>Config options</summary>
+
+| Name | Description | Default | Required |
+|:----|:-----------|:-------|:--------:|
+| update_every | Data collection frequency. | 1 | no |
+| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
+| url | Server URL. | http://127.0.0.1:8123 | yes |
+| timeout | HTTP request timeout. | 1 | no |
+| username | Username for basic HTTP authentication. | | no |
+| password | Password for basic HTTP authentication. | | no |
+| proxy_url | Proxy URL. | | no |
+| proxy_username | Username for proxy basic HTTP authentication. | | no |
+| proxy_password | Password for proxy basic HTTP authentication. | | no |
+| method | HTTP request method. | GET | no |
+| body | HTTP request body. | | no |
+| headers | HTTP request headers. | | no |
+| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
+| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
+| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
+| tls_cert | Client TLS certificate. | | no |
+| tls_key | Client TLS key. | | no |
+
+</details>
+
+#### Examples
+
+##### Basic
+
+A basic example configuration.
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+```
+##### HTTP authentication
+
+Basic HTTP authentication.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ username: username
+ password: password
+
+```
+</details>
+
+##### HTTPS with self-signed certificate
+
+ClickHouse with enabled HTTPS and self-signed certificate.
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: https://127.0.0.1:8123
+ tls_skip_verify: yes
+
+```
+</details>
+
+##### Multi-instance
+
+> **Note**: When you define multiple jobs, their names must be unique.
+
+Collecting metrics from local and remote instances.
+
+
+<details open><summary>Config</summary>
+
+```yaml
+jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+ - name: remote
+ url: http://192.0.2.1:8123
+
+```
+</details>
+
+
+
+## Troubleshooting
+
+### Debug Mode
+
+To troubleshoot issues with the `clickhouse` collector, run the `go.d.plugin` with the debug option enabled. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `go.d.plugin` to debug the collector:
+
+ ```bash
+ ./go.d.plugin -d -m clickhouse
+ ```
+
+
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml b/src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml
new file mode 100644
index 000000000..e9a6b9152
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/metadata.yaml
@@ -0,0 +1,624 @@
+plugin_name: go.d.plugin
+modules:
+ - meta:
+ id: collector-go.d.plugin-clickhouse
+ plugin_name: go.d.plugin
+ module_name: clickhouse
+ monitored_instance:
+ name: ClickHouse
+ link: https://clickhouse.com/
+ icon_filename: clickhouse.svg
+ categories:
+ - data-collection.database-servers
+ keywords:
+ - database
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: |
+ This collector retrieves performance data from ClickHouse for connections, queries, resources, replication, IO, and data operations (inserts, selects, merges) using HTTP requests and ClickHouse system tables. It monitors your ClickHouse server's health and activity.
+ method_description: |
+ It sends HTTP requests to the ClickHouse [HTTP interface](https://clickhouse.com/docs/en/interfaces/http), executing SELECT queries to retrieve data from various system tables.
+ Specifically, it collects metrics from the following tables:
+
+ - system.metrics
+ - system.async_metrics
+ - system.events
+ - system.disks
+ - system.parts
+ - system.processes
+ supported_platforms:
+ include: []
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: |
+ By default, it detects ClickHouse instances running on localhost that are listening on port 8123.
+ On startup, it tries to collect metrics from:
+
+ - http://127.0.0.1:8123
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list: []
+ configuration:
+ file:
+ name: go.d/clickhouse.conf
+ options:
+ description: |
+ The following options can be defined globally: update_every, autodetection_retry.
+ folding:
+ title: Config options
+ enabled: true
+ list:
+ - name: update_every
+ description: Data collection frequency.
+ default_value: 1
+ required: false
+ - name: autodetection_retry
+ description: Recheck interval in seconds. Zero means no recheck will be scheduled.
+ default_value: 0
+ required: false
+ - name: url
+ description: Server URL.
+ default_value: http://127.0.0.1:8123
+ required: true
+ - name: timeout
+ description: HTTP request timeout.
+ default_value: 1
+ required: false
+ - name: username
+ description: Username for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: password
+ description: Password for basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_url
+ description: Proxy URL.
+ default_value: ""
+ required: false
+ - name: proxy_username
+ description: Username for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: proxy_password
+ description: Password for proxy basic HTTP authentication.
+ default_value: ""
+ required: false
+ - name: method
+ description: HTTP request method.
+ default_value: "GET"
+ required: false
+ - name: body
+ description: HTTP request body.
+ default_value: ""
+ required: false
+ - name: headers
+ description: HTTP request headers.
+ default_value: ""
+ required: false
+ - name: not_follow_redirects
+ description: Redirect handling policy. Controls whether the client follows redirects.
+ default_value: no
+ required: false
+ - name: tls_skip_verify
+ description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
+ default_value: no
+ required: false
+ - name: tls_ca
+ description: Certification authority that the client uses when verifying the server's certificates.
+ default_value: ""
+ required: false
+ - name: tls_cert
+ description: Client TLS certificate.
+ default_value: ""
+ required: false
+ - name: tls_key
+ description: Client TLS key.
+ default_value: ""
+ required: false
+ examples:
+ folding:
+ title: Config
+ enabled: true
+ list:
+ - name: Basic
+ folding:
+ enabled: false
+ description: A basic example configuration.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ - name: HTTP authentication
+ description: Basic HTTP authentication.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+ username: username
+ password: password
+ - name: HTTPS with self-signed certificate
+ description: ClickHouse with enabled HTTPS and self-signed certificate.
+ config: |
+ jobs:
+ - name: local
+ url: https://127.0.0.1:8123
+ tls_skip_verify: yes
+ - name: Multi-instance
+ description: |
+ > **Note**: When you define multiple jobs, their names must be unique.
+
+ Collecting metrics from local and remote instances.
+ config: |
+ jobs:
+ - name: local
+ url: http://127.0.0.1:8123
+
+ - name: remote
+ url: http://192.0.2.1:8123
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: clickhouse_restarted
+ metric: clickhouse.uptime
+ info: ClickHouse has recently been restarted
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_queries_preempted
+ metric: clickhouse.queries_preempted
+ info: ClickHouse has queries that are stopped and waiting due to priority setting
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_long_running_query
+ metric: clickhouse.longest_running_query_time
+ info: ClickHouse has a long-running query exceeding the threshold
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_rejected_inserts
+ metric: clickhouse.rejected_inserts
+ info: ClickHouse has INSERT queries that are rejected due to high number of active data parts for partition in a MergeTree
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_delayed_inserts
+ metric: clickhouse.delayed_inserts
+ info: ClickHouse has INSERT queries that are throttled due to high number of active data parts for partition in a MergeTree
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_replication_lag
+ metric: clickhouse.replicas_max_absolute_delay
+ info: ClickHouse is experiencing replication lag greater than 5 minutes
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_replicated_readonly_tables
+ metric: clickhouse.replicated_readonly_tables
+ info: ClickHouse has replicated tables in readonly state due to ZooKeeper session loss/startup without ZooKeeper configured
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_max_part_count_for_partition
+ metric: clickhouse.max_part_count_for_partition
+ info: ClickHouse high number of parts per partition
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_distributed_connections_failures
+ metric: clickhouse.distributed_connections_fail_exhausted_retries
+ info: ClickHouse has failed distributed connections after exhausting all retry attempts
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ - name: clickhouse_distributed_files_to_insert
+ metric: clickhouse.distributed_files_to_insert
+ info: ClickHouse high number of pending files to process for asynchronous insertion into Distributed tables
+ link: https://github.com/netdata/netdata/blob/master/src/health/health.d/clickhouse.conf
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: These metrics refer to the entire monitored application.
+ labels: []
+ metrics:
+ - name: clickhouse.connections
+ description: Connections
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: tcp
+ - name: http
+ - name: mysql
+ - name: postgresql
+ - name: interserver
+ - name: clickhouse.slow_reads
+ description: Slow reads from a file
+ unit: reads/s
+ chart_type: line
+ dimensions:
+ - name: slow
+ - name: clickhouse.read_backoff
+ description: Read backoff events
+ unit: events/s
+ chart_type: line
+ dimensions:
+ - name: read_backoff
+ - name: clickhouse.memory_usage
+ description: Memory usage
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: used
+ - name: clickhouse.running_queries
+ description: Running queries
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: running
+ - name: clickhouse.queries_preempted
+ description: Queries waiting due to priority
+ unit: queries
+ chart_type: line
+ dimensions:
+ - name: preempted
+ - name: clickhouse.queries
+ description: Queries
+ unit: queries/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.select_queries
+ description: Select queries
+ unit: selects/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.insert_queries
+ description: Insert queries
+ unit: inserts/s
+ chart_type: stacked
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.queries_memory_limit_exceeded
+ description: Memory limit exceeded for query
+ unit: queries/s
+ chart_type: line
+ dimensions:
+ - name: mem_limit_exceeded
+ - name: clickhouse.longest_running_query_time
+ description: Longest running query time
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: longest_query_time
+ - name: clickhouse.queries_latency
+ description: Queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: queries_time
+ - name: clickhouse.select_queries_latency
+ description: Select queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: selects_time
+ - name: clickhouse.insert_queries_latency
+ description: Insert queries latency
+ unit: microseconds
+ chart_type: line
+ dimensions:
+ - name: inserts_time
+ - name: clickhouse.io
+ description: Read and written data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: clickhouse.iops
+ description: Read and write operations
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: reads
+ - name: writes
+ - name: clickhouse.io_errors
+ description: Read and write errors
+ unit: errors/s
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: clickhouse.io_seeks
+ description: lseek function calls
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: lseek
+ - name: clickhouse.io_file_opens
+ description: File opens
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: file_open
+ - name: clickhouse.replicated_parts_current_activity
+ description: Replicated parts current activity
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: fetch
+ - name: send
+ - name: check
+ - name: clickhouse.replicas_max_absolute_dela
+ description: Replicas max absolute delay
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: replication_delay
+ - name: clickhouse.replicated_readonly_tables
+ description: Replicated tables in readonly state
+ unit: tables
+ chart_type: line
+ dimensions:
+ - name: read_only
+ - name: clickhouse.replicated_data_loss
+ description: Replicated data loss
+ unit: events
+ chart_type: line
+ dimensions:
+ - name: data_loss
+ - name: clickhouse.replicated_part_fetches
+ description: Replicated part fetches
+ unit: fetches/s
+ chart_type: line
+ dimensions:
+ - name: successful
+ - name: failed
+ - name: clickhouse.inserted_rows
+ description: Inserted rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: clickhouse.inserted_bytes
+ description: Inserted data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inserted
+ - name: clickhouse.rejected_inserts
+ description: Rejected inserts
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: rejected
+ - name: clickhouse.delayed_inserts
+ description: Delayed inserts
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: clickhouse.delayed_inserts_throttle_time
+ description: Delayed inserts throttle time
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: delayed_inserts_throttle_time
+ - name: clickhouse.selected_bytes
+ description: Selected data
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_rows
+ description: Selected rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_parts
+ description: Selected parts
+ unit: parts/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_ranges
+ description: Selected ranges
+ unit: ranges/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.selected_marks
+ description: Selected marks
+ unit: marks/s
+ chart_type: line
+ dimensions:
+ - name: selected
+ - name: clickhouse.merges
+ description: Merge operations
+ unit: ops/s
+ chart_type: line
+ dimensions:
+ - name: merge
+ - name: clickhouse.merges_latency
+ description: Time spent for background merges
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: merges_time
+ - name: clickhouse.merged_uncompressed_bytes
+ description: Uncompressed data read for background merges
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: merged_uncompressed
+ - name: clickhouse.merged_rows
+ description: Merged rows
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: merged
+ - name: clickhouse.merge_tree_data_writer_inserted_rows
+ description: Rows INSERTed to MergeTree tables
+ unit: rows/s
+ chart_type: line
+ dimensions:
+ - name: inserted
+ - name: clickhouse.merge_tree_data_writer_uncompressed_bytes
+ description: Data INSERTed to MergeTree tables
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: inserted
+ - name: clickhouse.merge_tree_data_writer_compressed_bytes
+ description: Data written to disk for data INSERTed to MergeTree tables
+ unit: bytes/s
+ chart_type: area
+ dimensions:
+ - name: written
+ - name: clickhouse.uncompressed_cache_requests
+ description: Uncompressed cache requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: clickhouse.mark_cache_requests
+ description: Mark cache requests
+ unit: requests/s
+ chart_type: line
+ dimensions:
+ - name: hits
+ - name: misses
+ - name: clickhouse.max_part_count_for_partition
+ description: Max part count for partition
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: max_parts_partition
+ - name: clickhouse.parts_count
+ description: Parts
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: temporary
+ - name: pre_active
+ - name: active
+ - name: deleting
+ - name: delete_on_destroy
+ - name: outdated
+ - name: wide
+ - name: compact
+ - name: distributed_connections
+ description: Active distributed connection
+ unit: connections
+ chart_type: line
+ dimensions:
+ - name: active
+ - name: distributed_connections_attempts
+ description: Distributed connection attempts
+ unit: attempts/s
+ chart_type: line
+ dimensions:
+ - name: connection
+ - name: distributed_connections_fail_retries
+ description: Distributed connection fails with retry
+ unit: fails/s
+ chart_type: line
+ dimensions:
+ - name: connection_retry
+ - name: distributed_connections_fail_exhausted_retries
+ description: Distributed connection fails after all retries finished
+ unit: fails/s
+ chart_type: line
+ dimensions:
+ - name: connection_retry_exhausted
+ - name: distributed_files_to_insert
+ description: Pending files to process for asynchronous insertion into Distributed tables
+ unit: files
+ chart_type: line
+ dimensions:
+ - name: pending_insertions
+ - name: distributed_rejected_inserts
+ description: Rejected INSERTs to a Distributed table
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: rejected
+ - name: distributed_delayed_inserts
+ description: Delayed INSERTs to a Distributed table
+ unit: inserts/s
+ chart_type: line
+ dimensions:
+ - name: delayed
+ - name: distributed_delayed_inserts_latency
+ description: Time spent while the INSERT of a block to a Distributed table was throttled
+ unit: milliseconds
+ chart_type: line
+ dimensions:
+ - name: delayed_time
+ - name: distributed_sync_insertion_timeout_exceeded
+ description: Distributed table sync insertions timeouts
+ unit: timeouts/s
+ chart_type: line
+ dimensions:
+ - name: sync_insertion
+ - name: distributed_async_insertions_failures
+ description: Distributed table async insertion failures
+ unit: failures/s
+ chart_type: line
+ dimensions:
+ - name: async_insertions
+ - name: clickhouse.uptime
+ description: Uptime
+ unit: seconds
+ chart_type: line
+ dimensions:
+ - name: uptime
+ - name: disk
+ description: These metrics refer to the Disk.
+ labels:
+ - name: disk_name
+ description: Name of the disk as defined in the [server configuration](https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree#table_engine-mergetree-multiple-volumes_configure).
+ metrics:
+ - name: clickhouse.disk_space_usage
+ description: Disk space usage
+ unit: bytes
+ chart_type: stacked
+ dimensions:
+ - name: free
+ - name: used
+ - name: table
+ description: These metrics refer to the Database Table.
+ labels:
+ - name: database
+ description: Name of the database.
+ - name: table
+ description: Name of the table.
+ metrics:
+ - name: clickhouse.database_table_size
+ description: Table size
+ unit: bytes
+ chart_type: area
+ dimensions:
+ - name: size
+ - name: clickhouse.database_table_parts
+ description: Table parts
+ unit: parts
+ chart_type: line
+ dimensions:
+ - name: parts
+ - name: clickhouse.database_table_rows
+ description: Table rows
+ unit: rows
+ chart_type: line
+ dimensions:
+ - name: rows
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.json b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.yaml b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_longest_query_time.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_longest_query_time.csv
new file mode 100644
index 000000000..85119aa6f
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_longest_query_time.csv
@@ -0,0 +1,2 @@
+"value"
+"0.0738"
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_async_metrics.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_async_metrics.csv
new file mode 100644
index 000000000..7c9da4f46
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_async_metrics.csv
@@ -0,0 +1,434 @@
+"metric","value"
+"AsynchronousMetricsCalculationTimeSpent",0.003263409
+"NumberOfDetachedByUserParts",0
+"NumberOfDetachedParts",0
+"AsynchronousHeavyMetricsCalculationTimeSpent",1.6e-7
+"TotalPrimaryKeyBytesInMemoryAllocated",342740
+"TotalPartsOfMergeTreeTablesSystem",26
+"TotalRowsOfMergeTreeTablesSystem",72275588
+"TotalBytesOfMergeTreeTablesSystem",66552963
+"TotalRowsOfMergeTreeTables",72275588
+"TotalBytesOfMergeTreeTables",66552963
+"NumberOfDatabases",4
+"MaxPartCountForPartition",7
+"ReplicasSumMergesInQueue",0
+"ReplicasSumInsertsInQueue",0
+"ReplicasSumQueueSize",0
+"ReplicasMaxInsertsInQueue",0
+"ReplicasMaxQueueSize",0
+"DiskUnreserved_default",165498056704
+"DiskAvailable_default",165498056704
+"DiskUsed_default",45181276160
+"FilesystemLogsPathAvailableINodes",12209636
+"FilesystemLogsPathUsedBytes",45181276160
+"FilesystemLogsPathAvailableBytes",165498056704
+"FilesystemLogsPathTotalBytes",210679332864
+"FilesystemMainPathUsedINodes",847436
+"FilesystemMainPathAvailableBytes",165498056704
+"AsynchronousHeavyMetricsUpdateInterval",120.000112
+"HashTableStatsCacheMisses",35
+"OSNiceTimeCPU2",0
+"Uptime",64380.292003818
+"FilesystemCacheBytes",0
+"OSSoftIrqTimeCPU0",0.0199978602289555
+"QueryCacheEntries",0
+"QueryCacheBytes",0
+"IndexMarkCacheBytes",0
+"OSNiceTimeCPU10",0
+"UncompressedCacheCells",0
+"UncompressedCacheBytes",0
+"IndexUncompressedCacheCells",0
+"HTTPThreads",1
+"InterserverThreads",0
+"PageCacheBytes",0
+"OSSoftIrqTimeCPU3",0
+"VMNumMaps",3034
+"BlockWriteTime_sr0",0
+"NetworkSendDrop_veth22b9458",0
+"NetworkSendBytes_veth22b9458",0
+"CPUFrequencyMHz_11",2000
+"NetworkReceiveDrop_veth22b9458",0
+"OSSystemTimeCPU13",0
+"NetworkReceiveBytes_veth22b9458",0
+"BlockWriteOps_dm-0",0
+"NetworkSendErrors_vethe1fd940",0
+"OSGuestNiceTimeCPU9",0
+"NetworkSendPackets_vethe1fd940",0
+"BlockReadMerges_dm-0",0
+"NetworkSendBytes_vethe1fd940",0
+"NetworkReceiveDrop_vethe1fd940",0
+"OSUserTimeCPU5",0
+"NetworkSendDrop_veth0cdb608",0
+"NetworkReceiveErrors_veth0cdb608",0
+"CPUFrequencyMHz_2",2000
+"OSGuestNiceTime",0
+"NetworkReceivePackets_veth0cdb608",0
+"CompiledExpressionCacheCount",5
+"NetworkSendDrop_veth8415c5c",0
+"NetworkReceiveErrors_veth8415c5c",0
+"OSUserTimeCPU9",0
+"NetworkReceivePackets_veth8415c5c",0
+"NetworkReceiveBytes_veth8415c5c",0
+"NetworkSendPackets_vethfa2b7f2",0
+"NetworkSendErrors_vethb608e1b",0
+"OSNiceTimeCPU8",0
+"NetworkReceiveDrop_vethb608e1b",0
+"OSIOWaitTimeCPU7",0
+"NetworkReceiveErrors_vethe1fd940",0
+"NetworkReceiveErrors_vethb608e1b",0
+"CompiledExpressionCacheBytes",65536
+"NetworkSendDrop_vethfa2b7f2",0
+"IndexUncompressedCacheBytes",0
+"CPUFrequencyMHz_12",2000
+"NetworkSendBytes_vethfa2b7f2",0
+"NetworkReceiveDrop_vethfa2b7f2",0
+"NetworkSendErrors_veth0cdb608",0
+"OSSystemTimeCPU7",0
+"NetworkReceivePackets_vethfa2b7f2",0
+"OSThreadsTotal",1432
+"NetworkSendBytes_docker0",0
+"NetworkReceiveDrop_docker0",0
+"OSIOWaitTimeCPU2",0
+"NetworkReceiveBytes_vethb608e1b",0
+"OSIOWaitTimeCPU10",0
+"NetworkReceiveBytes_vethfa2b7f2",0
+"jemalloc.epoch",64382
+"NetworkReceiveErrors_docker0",0
+"OSGuestNiceTimeCPU6",0
+"NetworkSendDrop_br-392a8d0e2863",0
+"OSStealTimeCPU13",0
+"NetworkSendErrors_br-392a8d0e2863",0
+"NetworkSendBytes_br-392a8d0e2863",0
+"NetworkReceiveBytes_veth0cdb608",0
+"NetworkReceiveBytes_br-392a8d0e2863",0
+"NetworkSendDrop_dummy0",0
+"jemalloc.retained",94481276928
+"NetworkSendPackets_dummy0",0
+"NetworkSendBytes_dummy0",0
+"OSStealTimeCPU3",0
+"NetworkReceiveErrors_dummy0",0
+"NetworkReceivePackets_dummy0",0
+"NetworkReceiveBytes_dummy0",0
+"NetworkReceiveBytes_vethe1fd940",0
+"OSUserTimeCPU6",0
+"NetworkSendDrop_ens18",0
+"NetworkSendErrors_ens18",0
+"OSNiceTimeNormalized",0
+"NetworkReceiveErrors_ens18",0
+"BlockQueueTime_sr0",0
+"FilesystemCacheFiles",0
+"BlockActiveTime_dm-0",0
+"BlockActiveTime_sr0",0
+"BlockInFlightOps_sr0",0
+"BlockDiscardTime_sr0",0
+"OSGuestNiceTimeCPU1",0
+"BlockReadBytes_sr0",0
+"BlockDiscardMerges_sr0",0
+"BlockReadMerges_sr0",0
+"OSSoftIrqTimeCPU1",0
+"BlockDiscardTime_dm-0",0
+"OSUserTimeCPU7",0.00999893011447775
+"BlockWriteBytes_dm-0",0
+"BlockReadBytes_dm-0",0
+"OSContextSwitches",5242
+"FilesystemMainPathAvailableINodes",12209636
+"BlockDiscardOps_dm-0",0
+"BlockWriteTime_dm-0",0
+"BlockQueueTime_sda",0
+"OSIrqTimeCPU11",0
+"BlockActiveTime_sda",0
+"OSIrqTimeCPU4",0
+"NetworkSendPackets_ens18",3
+"BlockDiscardTime_sda",0
+"Jitter",0.000107
+"BlockReadBytes_sda",0
+"BlockDiscardMerges_sda",0
+"NetworkSendErrors_veth8415c5c",0
+"BlockReadMerges_sda",0
+"NetworkSendPackets_br-392a8d0e2863",0
+"BlockDiscardOps_sda",0
+"BlockWriteOps_sda",0
+"OSSystemTimeCPU8",0
+"CPUFrequencyMHz_15",2000
+"CPUFrequencyMHz_14",2000
+"OSNiceTimeCPU9",0
+"NetworkReceivePackets_veth22b9458",0
+"BlockWriteOps_sr0",0
+"BlockWriteTime_sda",0
+"OSGuestTimeCPU6",0
+"NetworkReceiveBytes_ens18",442
+"CPUFrequencyMHz_10",2000
+"BlockReadOps_sr0",0
+"CPUFrequencyMHz_7",2000
+"CPUFrequencyMHz_13",2000
+"CPUFrequencyMHz_6",2000
+"CPUFrequencyMHz_5",2000
+"BlockDiscardOps_sr0",0
+"NetworkReceiveErrors_vethfa2b7f2",0
+"CPUFrequencyMHz_4",2000
+"CPUFrequencyMHz_3",2000
+"CPUFrequencyMHz_1",2000
+"OSNiceTimeCPU6",0
+"NetworkSendPackets_vethb608e1b",0
+"CPUFrequencyMHz_0",2000
+"OSGuestNiceTimeCPU0",0
+"OSMemoryFreePlusCached",28217331712
+"TCPThreads",1
+"OSStealTimeCPU6",0
+"OSMemoryCached",9402826752
+"OSMemoryBuffers",1473716224
+"OSMemoryAvailable",29864460288
+"MemoryDataAndStack",101636190208
+"OSUserTimeCPU11",0
+"ReplicasMaxMergesInQueue",0
+"OSGuestTimeNormalized",0
+"OSSoftIrqTimeNormalized",0.0012498662643097187
+"OSGuestNiceTimeCPU13",0
+"OSIrqTimeNormalized",0
+"OSUserTimeCPU4",0
+"OSIOWaitTimeNormalized",0
+"OSIdleTimeNormalized",0.9892691482011424
+"OSSystemTimeNormalized",0.004999465057238875
+"BlockWriteBytes_sr0",0
+"NetworkReceivePackets_docker0",0
+"OSUserTimeNormalized",0.0024997325286194375
+"OSSystemTime",0.079991440915822
+"NetworkReceivePackets_br-392a8d0e2863",0
+"OSProcessesCreated",7
+"OSInterrupts",2971
+"BlockWriteMerges_sr0",0
+"OSProcessesRunning",7
+"BlockInFlightOps_sda",0
+"CPUFrequencyMHz_9",2000
+"OSGuestTimeCPU5",0
+"OSNiceTimeCPU13",0
+"TotalPartsOfMergeTreeTables",26
+"OSGuestTimeCPU15",0
+"OSStealTimeCPU15",0
+"OSIrqTimeCPU15",0
+"OSGuestTimeCPU10",0
+"OSStealTimeCPU1",0
+"OSSystemTimeCPU15",0
+"OSNiceTimeCPU15",0
+"PageCachePinnedBytes",0
+"OSUserTimeCPU15",0
+"OSSoftIrqTimeCPU14",0
+"OSIdleTimeCPU14",0.999893011447775
+"OSIdleTimeCPU1",0.9898940813332973
+"OSSoftIrqTimeCPU13",0
+"OSStealTimeCPU12",0
+"OSIdleTimeCPU13",0.9898940813332973
+"NetworkSendBytes_veth0cdb608",0
+"MemoryResidentMax",15751659520
+"OSIdleTimeCPU15",0.999893011447775
+"OSGuestNiceTimeCPU5",0
+"OSGuestNiceTimeCPU12",0
+"OSIrqTime",0
+"OSGuestTimeCPU9",0
+"OSGuestTimeCPU12",0
+"OSSoftIrqTimeCPU12",0
+"OSIdleTime",15.828306371218279
+"OSSoftIrqTimeCPU7",0
+"OSUserTimeCPU2",0
+"OSIdleTimeCPU12",0.9898940813332973
+"OSGuestTimeCPU2",0
+"OSSystemTimeCPU12",0.00999893011447775
+"OSSystemTimeCPU10",0.00999893011447775
+"OSGuestTimeCPU11",0
+"OSStealTimeCPU11",0
+"OSSoftIrqTimeCPU11",0
+"OSIOWaitTimeCPU11",0
+"BlockQueueTime_dm-0",0
+"OSIdleTimeCPU11",0.999893011447775
+"OSIrqTimeCPU8",0
+"OSNiceTimeCPU11",0
+"OSStealTimeCPU10",0
+"NetworkReceivePackets_vethb608e1b",0
+"NetworkReceiveErrors_br-392a8d0e2863",0
+"OSNiceTimeCPU14",0
+"NetworkSendPackets_veth22b9458",0
+"OSSystemTimeCPU2",0
+"OSSoftIrqTimeCPU10",0
+"ReplicasMaxRelativeDelay",0
+"OSIrqTimeCPU10",0
+"MMapCacheCells",0
+"OSIrqTimeCPU9",0
+"NetworkReceiveDrop_veth0cdb608",0
+"OSIrqTimeCPU1",0
+"OSIrqTimeCPU14",0
+"OSIrqTimeCPU13",0
+"NetworkSendDrop_docker0",0
+"OSIdleTimeCPU10",0.9798951512188194
+"OSGuestTimeCPU14",0
+"OSUserTimeCPU13",0
+"OSIOWaitTimeCPU14",0
+"OSStealTimeCPU9",0
+"NetworkReceiveErrors_veth22b9458",0
+"NetworkReceiveDrop_dummy0",0
+"OSSoftIrqTimeCPU9",0
+"OSIOWaitTimeCPU9",0
+"OSIdleTimeCPU9",0.999893011447775
+"OSGuestTimeCPU3",0
+"OSIOWaitTimeCPU13",0
+"BlockReadTime_sr0",0
+"OSSoftIrqTimeCPU15",0
+"OSIOWaitTimeCPU12",0
+"OSGuestTimeCPU8",0
+"OSStealTimeCPU8",0
+"OSUserTimeCPU0",0
+"OSIdleTimeCPU8",0.9798951512188194
+"OSGuestNiceTimeCPU4",0
+"OSUserTimeCPU8",0.00999893011447775
+"OSSystemTimeCPU9",0
+"PrometheusThreads",1
+"NetworkSendErrors_veth22b9458",0
+"OSMemoryTotal",33652854784
+"OSGuestNiceTimeCPU7",0
+"OSStealTimeCPU7",0
+"BlockReadOps_dm-0",0
+"OSStealTimeCPU14",0
+"OSGuestNiceTimeCPU8",0
+"NetworkReceivePackets_vethe1fd940",0
+"OSIdleTimeCPU7",0.9898940813332973
+"OSSoftIrqTimeCPU8",0
+"OSNiceTimeCPU7",0
+"OSIOWaitTimeCPU8",0
+"OSIOWaitTimeCPU6",0
+"NetworkSendBytes_vethb608e1b",0
+"NetworkSendBytes_ens18",714
+"OSIdleTimeCPU3",0.9898940813332973
+"OSGuestNiceTimeCPU11",0
+"jemalloc.background_thread.num_threads",0
+"BlockReadTime_sda",0
+"OSSoftIrqTimeCPU6",0
+"OSSoftIrqTimeCPU2",0
+"OSGuestNiceTimeCPU2",0
+"OSUserTimeCPU12",0
+"LoadAverage5",0.22
+"OSGuestNiceTimeCPU10",0
+"jemalloc.background_thread.num_runs",0
+"FilesystemLogsPathUsedINodes",847436
+"OSIrqTimeCPU6",0
+"OSIrqTimeCPU7",0
+"HashTableStatsCacheHits",124
+"OSNiceTimeCPU0",0
+"CPUFrequencyMHz_8",2000
+"MarkCacheBytes",76560
+"FilesystemMainPathTotalINodes",13057072
+"NetworkSendPackets_veth0cdb608",0
+"NetworkReceiveDrop_br-392a8d0e2863",0
+"OSStealTimeNormalized",0
+"NetworkReceiveDrop_veth8415c5c",0
+"BlockDiscardBytes_sr0",0
+"OSSystemTimeCPU5",0.00999893011447775
+"OSUptime",1444727.52
+"FilesystemLogsPathTotalINodes",13057072
+"NetworkSendPackets_veth8415c5c",0
+"OSStealTimeCPU4",0
+"OSSoftIrqTimeCPU4",0
+"NetworkSendErrors_vethfa2b7f2",0
+"NetworkSendBytes_veth8415c5c",0
+"MemoryShared",333160448
+"OSGuestTimeCPU4",0
+"jemalloc.metadata",265969056
+"OSGuestNiceTimeCPU15",0
+"NumberOfTablesSystem",100
+"OSSystemTimeCPU11",0
+"OSIdleTimeCPU4",0.9898940813332973
+"IndexMarkCacheFiles",0
+"OSIOWaitTimeCPU15",0
+"OSNiceTimeCPU4",0
+"jemalloc.arenas.all.pactive",113928
+"OSUserTimeCPU1",0
+"NetworkReceivePackets_ens18",3
+"OSIrqTimeCPU3",0
+"OSIOWaitTimeCPU3",0
+"OSUserTime",0.039995720457911
+"OSIdleTimeCPU5",0.9898940813332973
+"OSOpenFiles",3008
+"OSIrqTimeCPU5",0
+"PostgreSQLThreads",0
+"OSUserTimeCPU10",0
+"TotalPrimaryKeyBytesInMemory",74202
+"OSIrqTimeCPU2",0
+"jemalloc.mapped",1036132352
+"BlockWriteMerges_dm-0",0
+"OSMemoryFreeWithoutCached",18814504960
+"DiskTotal_default",210679332864
+"OSGuestNiceTimeCPU3",0
+"OSIrqTimeCPU12",0
+"OSIOWaitTimeCPU5",0
+"OSNiceTimeCPU1",0
+"NetworkReceiveDrop_ens18",0
+"jemalloc.active",466649088
+"OSNiceTimeCPU12",0
+"OSGuestNiceTimeNormalized",0
+"OSSystemTimeCPU14",0
+"OSSoftIrqTimeCPU5",0
+"BlockReadTime_dm-0",0
+"MemoryResident",1279381504
+"BlockDiscardMerges_dm-0",0
+"OSGuestTimeCPU0",0
+"NetworkSendDrop_vethe1fd940",0
+"MemoryVirtual",102527639552
+"OSSystemTimeCPU3",0.00999893011447775
+"OSNiceTimeCPU3",0
+"FilesystemMainPathUsedBytes",45181276160
+"OSUserTimeCPU14",0
+"OSSoftIrqTime",0.0199978602289555
+"jemalloc.metadata_thp",0
+"OSStealTimeCPU0",0
+"OSSystemTimeCPU0",0
+"OSGuestNiceTimeCPU14",0
+"OSStealTimeCPU2",0
+"OSSystemTimeCPU1",0
+"BlockInFlightOps_dm-0",0
+"OSSystemTimeCPU6",0.02999679034343325
+"VMMaxMapCount",262144
+"NetworkSendPackets_docker0",0
+"OSUserTimeCPU3",0.00999893011447775
+"OSGuestTime",0
+"OSNiceTimeCPU5",0
+"OSIOWaitTimeCPU1",0
+"OSIOWaitTime",0
+"jemalloc.arenas.all.pmuzzy",0
+"NetworkSendErrors_docker0",0
+"OSNiceTime",0
+"LoadAverage15",0.24
+"OSIdleTimeCPU0",0.9898940813332973
+"MarkCacheFiles",54
+"OSIOWaitTimeCPU4",0
+"BlockWriteMerges_sda",0
+"NumberOfTables",128
+"OSIrqTimeCPU0",0
+"jemalloc.arenas.all.muzzy_purged",0
+"NetworkSendDrop_vethb608e1b",0
+"OSProcessesBlocked",0
+"jemalloc.allocated",412954608
+"NetworkSendErrors_dummy0",0
+"ReplicasMaxAbsoluteDelay",0
+"OSThreadsRunnable",8
+"OSStealTimeCPU5",0
+"LoadAverage1",0.18
+"OSGuestTimeCPU7",0
+"BlockDiscardBytes_sda",0
+"jemalloc.arenas.all.pdirty",60698
+"OSIdleTimeCPU6",0.9698962211043417
+"OSSystemTimeCPU4",0
+"OSStealTime",0
+"MemoryCode",283267072
+"AsynchronousMetricsUpdateInterval",1.000107
+"NetworkReceiveBytes_docker0",0
+"BlockDiscardBytes_dm-0",0
+"jemalloc.arenas.all.dirty_purged",363559728
+"HashTableStatsCacheEntries",9
+"MySQLThreads",0
+"FilesystemMainPathTotalBytes",210679332864
+"jemalloc.resident",960614400
+"OSGuestTimeCPU13",0
+"jemalloc.background_thread.run_intervals",0
+"BlockReadOps_sda",0
+"OSIOWaitTimeCPU0",0
+"OSIdleTimeCPU2",0.9898940813332973
+"BlockWriteBytes_sda",0
+"OSGuestTimeCPU1",0
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_disks.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_disks.csv
new file mode 100644
index 000000000..42751e54e
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_disks.csv
@@ -0,0 +1,2 @@
+"name","free_space","total_space"
+"default",165494767616,210679332864
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_events.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_events.csv
new file mode 100644
index 000000000..546e7e7e0
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_events.csv
@@ -0,0 +1,102 @@
+"event","value"
+"Query", 94
+"SelectQuery", 94
+"InitialQuery", 94
+"QueriesWithSubqueries", 174
+"SelectQueriesWithSubqueries", 174
+"FailedQuery", 4
+"FailedSelectQuery", 4
+"QueryTimeMicroseconds", 870752
+"SelectQueryTimeMicroseconds", 870752
+"FileOpen",1568962
+"Seek", 27585
+"ReadBufferFromFileDescriptorRead", 8578903
+"ReadBufferFromFileDescriptorReadBytes", 29425788980
+"WriteBufferFromFileDescriptorWrite", 1413953
+"WriteBufferFromFileDescriptorWriteBytes", 19290175707
+"ReadCompressedBytes", 17765217623
+"CompressedReadBufferBlocks", 22044981
+"CompressedReadBufferBytes", 639439855526
+"OpenedFileCacheHits", 138
+"OpenedFileCacheMisses",346495
+"OpenedFileCacheMicroseconds", 307462
+"IOBufferAllocs", 4887901
+"IOBufferAllocBytes", 1142983723070
+"ArenaAllocChunks", 196
+"ArenaAllocBytes", 1556480
+"FunctionExecute", 8055228
+"TableFunctionExecute", 41
+"MarkCacheHits", 229
+"MarkCacheMisses", 54
+"CreatedReadBufferOrdinary",346633
+"DiskReadElapsedMicroseconds", 64340828296
+"DiskWriteElapsedMicroseconds", 33066436
+"NetworkReceiveElapsedMicroseconds", 17548931930
+"NetworkSendElapsedMicroseconds", 3313835
+"NetworkReceiveBytes", 2121500
+"NetworkSendBytes", 5492910494
+"InsertedRows", 28386169
+"InsertedBytes", 1104809236
+"CompileFunction", 5
+"CompileExpressionsMicroseconds",134487
+"CompileExpressionsBytes", 65536
+"ExternalProcessingFilesTotal", 605
+"SelectedParts", 68
+"SelectedRanges", 68
+"SelectedMarks", 836
+"SelectedRows", 29975411
+"SelectedBytes", 1127243995
+"WaitMarksLoadMicroseconds", 2823449
+"LoadedMarksCount", 31080118
+"LoadedMarksMemoryBytes",48212328
+"Merge", 3929781
+"MergedRows", 30485884752
+"MergedUncompressedBytes", 641315961578
+"MergesTimeMilliseconds", 4586438
+"MergeTreeDataWriterRows", 28386169
+"MergeTreeDataWriterUncompressedBytes", 1104809236
+"MergeTreeDataWriterCompressedBytes", 1127957207
+"MergeTreeDataWriterBlocks", 25840
+"MergeTreeDataWriterBlocksAlreadySorted", 16671
+"MergeTreeDataWriterSortingBlocksMicroseconds",7881192
+"MergeTreeDataWriterMergingBlocksMicroseconds", 30988
+"InsertedCompactParts", 25840
+"MergedIntoWideParts", 11349
+"MergedIntoCompactParts", 3132
+"ContextLock", 1727098
+"ContextLockWaitMicroseconds", 4763
+"RWLockAcquiredReadLocks", 322474
+"RWLockReadersWaitMilliseconds", 660
+"PartsLockHoldMicroseconds", 23499086
+"PartsLockWaitMicroseconds",13947
+"RealTimeMicroseconds", 5484454350
+"UserTimeMicroseconds", 4588286964
+"SystemTimeMicroseconds", 890356522
+"MemoryAllocatorPurge", 2
+"MemoryAllocatorPurgeTimeMicroseconds", 135392
+"SoftPageFaults", 71033079
+"OSCPUWaitMicroseconds", 1028066
+"OSCPUVirtualTimeMicroseconds", 5475763006
+"OSReadBytes", 8192
+"OSWriteBytes",20749721600
+"OSReadChars", 17832770560
+"OSWriteChars", 17947151360
+"QueryProfilerRuns", 7153
+"ThreadPoolReaderPageCacheHit", 428
+"ThreadPoolReaderPageCacheHitBytes", 8568656
+"ThreadPoolReaderPageCacheHitElapsedMicroseconds", 11635
+"SynchronousReadWaitMicroseconds", 11965
+"MainConfigLoads", 1
+"AggregationOptimizedEqualRangesOfKeys", 17
+"ServerStartupMilliseconds",463
+"AsyncLoaderWaitMicroseconds", 65377
+"LogTrace", 285068
+"LogDebug", 171106
+"LogInfo", 47
+"LogWarning", 2
+"LogError", 52
+"InterfaceHTTPSendBytes", 37853
+"InterfaceHTTPReceiveBytes", 72018
+"InterfaceNativeReceiveBytes", 11646
+"InterfacePrometheusSendBytes",3817434
+"InterfacePrometheusReceiveBytes", 2037836
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_metrics.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_metrics.csv
new file mode 100644
index 000000000..d5ecc29a7
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_metrics.csv
@@ -0,0 +1,283 @@
+"metric","value"
+"Query",1
+"Merge",0
+"Move",0
+"PartMutation",0
+"ReplicatedFetch",0
+"ReplicatedSend",0
+"ReplicatedChecks",0
+"BackgroundMergesAndMutationsPoolTask",0
+"BackgroundMergesAndMutationsPoolSize",64
+"BackgroundFetchesPoolTask",0
+"BackgroundFetchesPoolSize",32
+"BackgroundCommonPoolTask",0
+"BackgroundCommonPoolSize",16
+"BackgroundMovePoolTask",0
+"BackgroundMovePoolSize",16
+"BackgroundSchedulePoolTask",0
+"BackgroundSchedulePoolSize",512
+"BackgroundBufferFlushSchedulePoolTask",0
+"BackgroundBufferFlushSchedulePoolSize",16
+"BackgroundDistributedSchedulePoolTask",0
+"BackgroundDistributedSchedulePoolSize",16
+"BackgroundMessageBrokerSchedulePoolTask",0
+"BackgroundMessageBrokerSchedulePoolSize",16
+"CacheDictionaryUpdateQueueBatches",0
+"CacheDictionaryUpdateQueueKeys",0
+"DiskSpaceReservedForMerge",0
+"DistributedSend",0
+"QueryPreempted",0
+"TCPConnection",1
+"MySQLConnection",0
+"HTTPConnection",0
+"InterserverConnection",0
+"PostgreSQLConnection",0
+"OpenFileForRead",14
+"OpenFileForWrite",0
+"TotalTemporaryFiles",0
+"TemporaryFilesForSort",0
+"TemporaryFilesForAggregation",0
+"TemporaryFilesForJoin",0
+"TemporaryFilesUnknown",0
+"Read",2
+"RemoteRead",0
+"Write",0
+"NetworkReceive",0
+"NetworkSend",0
+"SendScalars",0
+"SendExternalTables",0
+"QueryThread",0
+"ReadonlyReplica",0
+"MemoryTracking",1270999152
+"MergesMutationsMemoryTracking",0
+"EphemeralNode",0
+"ZooKeeperSession",0
+"ZooKeeperWatch",0
+"ZooKeeperRequest",0
+"DelayedInserts",0
+"ContextLockWait",0
+"StorageBufferRows",0
+"StorageBufferBytes",0
+"DictCacheRequests",0
+"Revision",54486
+"VersionInteger",24005001
+"RWLockWaitingReaders",0
+"RWLockWaitingWriters",0
+"RWLockActiveReaders",1
+"RWLockActiveWriters",0
+"GlobalThread",714
+"GlobalThreadActive",651
+"GlobalThreadScheduled",651
+"LocalThread",0
+"LocalThreadActive",0
+"LocalThreadScheduled",0
+"MergeTreeDataSelectExecutorThreads",0
+"MergeTreeDataSelectExecutorThreadsActive",0
+"MergeTreeDataSelectExecutorThreadsScheduled",0
+"BackupsThreads",0
+"BackupsThreadsActive",0
+"BackupsThreadsScheduled",0
+"RestoreThreads",0
+"RestoreThreadsActive",0
+"RestoreThreadsScheduled",0
+"MarksLoaderThreads",0
+"MarksLoaderThreadsActive",0
+"MarksLoaderThreadsScheduled",0
+"IOPrefetchThreads",0
+"IOPrefetchThreadsActive",0
+"IOPrefetchThreadsScheduled",0
+"IOWriterThreads",0
+"IOWriterThreadsActive",0
+"IOWriterThreadsScheduled",0
+"IOThreads",0
+"IOThreadsActive",0
+"IOThreadsScheduled",0
+"ThreadPoolRemoteFSReaderThreads",0
+"ThreadPoolRemoteFSReaderThreadsActive",0
+"ThreadPoolRemoteFSReaderThreadsScheduled",0
+"ThreadPoolFSReaderThreads",0
+"ThreadPoolFSReaderThreadsActive",0
+"ThreadPoolFSReaderThreadsScheduled",0
+"BackupsIOThreads",0
+"BackupsIOThreadsActive",0
+"BackupsIOThreadsScheduled",0
+"DiskObjectStorageAsyncThreads",0
+"DiskObjectStorageAsyncThreadsActive",0
+"StorageHiveThreads",0
+"StorageHiveThreadsActive",0
+"StorageHiveThreadsScheduled",0
+"TablesLoaderBackgroundThreads",0
+"TablesLoaderBackgroundThreadsActive",0
+"TablesLoaderBackgroundThreadsScheduled",0
+"TablesLoaderForegroundThreads",0
+"TablesLoaderForegroundThreadsActive",0
+"TablesLoaderForegroundThreadsScheduled",0
+"DatabaseOnDiskThreads",0
+"DatabaseOnDiskThreadsActive",0
+"DatabaseOnDiskThreadsScheduled",0
+"DatabaseCatalogThreads",0
+"DatabaseCatalogThreadsActive",0
+"DatabaseCatalogThreadsScheduled",0
+"DestroyAggregatesThreads",0
+"DestroyAggregatesThreadsActive",0
+"DestroyAggregatesThreadsScheduled",0
+"HashedDictionaryThreads",0
+"HashedDictionaryThreadsActive",0
+"HashedDictionaryThreadsScheduled",0
+"CacheDictionaryThreads",0
+"CacheDictionaryThreadsActive",0
+"CacheDictionaryThreadsScheduled",0
+"ParallelFormattingOutputFormatThreads",0
+"ParallelFormattingOutputFormatThreadsActive",0
+"ParallelFormattingOutputFormatThreadsScheduled",0
+"ParallelParsingInputFormatThreads",0
+"ParallelParsingInputFormatThreadsActive",0
+"ParallelParsingInputFormatThreadsScheduled",0
+"MergeTreeBackgroundExecutorThreads",48
+"MergeTreeBackgroundExecutorThreadsActive",48
+"MergeTreeBackgroundExecutorThreadsScheduled",48
+"AsynchronousInsertThreads",0
+"AsynchronousInsertThreadsActive",0
+"AsynchronousInsertThreadsScheduled",0
+"AsynchronousInsertQueueSize",0
+"AsynchronousInsertQueueBytes",0
+"StartupSystemTablesThreads",0
+"StartupSystemTablesThreadsActive",0
+"StartupSystemTablesThreadsScheduled",0
+"AggregatorThreads",0
+"AggregatorThreadsActive",0
+"AggregatorThreadsScheduled",0
+"DDLWorkerThreads",0
+"DDLWorkerThreadsActive",0
+"DDLWorkerThreadsScheduled",0
+"StorageDistributedThreads",0
+"StorageDistributedThreadsActive",0
+"StorageDistributedThreadsScheduled",0
+"DistributedInsertThreads",0
+"DistributedInsertThreadsActive",0
+"DistributedInsertThreadsScheduled",0
+"StorageS3Threads",0
+"StorageS3ThreadsActive",0
+"StorageS3ThreadsScheduled",0
+"ObjectStorageS3Threads",0
+"ObjectStorageS3ThreadsActive",0
+"ObjectStorageS3ThreadsScheduled",0
+"ObjectStorageAzureThreads",0
+"ObjectStorageAzureThreadsActive",0
+"ObjectStorageAzureThreadsScheduled",0
+"MergeTreePartsLoaderThreads",0
+"MergeTreePartsLoaderThreadsActive",0
+"MergeTreePartsLoaderThreadsScheduled",0
+"MergeTreeOutdatedPartsLoaderThreads",0
+"MergeTreeOutdatedPartsLoaderThreadsActive",0
+"MergeTreeOutdatedPartsLoaderThreadsScheduled",0
+"MergeTreeUnexpectedPartsLoaderThreads",0
+"MergeTreeUnexpectedPartsLoaderThreadsActive",0
+"MergeTreeUnexpectedPartsLoaderThreadsScheduled",0
+"MergeTreePartsCleanerThreads",0
+"MergeTreePartsCleanerThreadsActive",0
+"MergeTreePartsCleanerThreadsScheduled",0
+"DatabaseReplicatedCreateTablesThreads",0
+"DatabaseReplicatedCreateTablesThreadsActive",0
+"DatabaseReplicatedCreateTablesThreadsScheduled",0
+"IDiskCopierThreads",0
+"IDiskCopierThreadsActive",0
+"IDiskCopierThreadsScheduled",0
+"SystemReplicasThreads",0
+"SystemReplicasThreadsActive",0
+"SystemReplicasThreadsScheduled",0
+"RestartReplicaThreads",0
+"RestartReplicaThreadsActive",0
+"RestartReplicaThreadsScheduled",0
+"QueryPipelineExecutorThreads",0
+"QueryPipelineExecutorThreadsActive",0
+"QueryPipelineExecutorThreadsScheduled",0
+"ParquetDecoderThreads",0
+"ParquetDecoderThreadsActive",0
+"ParquetDecoderThreadsScheduled",0
+"ParquetEncoderThreads",0
+"ParquetEncoderThreadsActive",0
+"ParquetEncoderThreadsScheduled",0
+"DWARFReaderThreads",0
+"DWARFReaderThreadsActive",0
+"DWARFReaderThreadsScheduled",0
+"OutdatedPartsLoadingThreads",0
+"OutdatedPartsLoadingThreadsActive",0
+"OutdatedPartsLoadingThreadsScheduled",0
+"DistributedBytesToInsert",0
+"BrokenDistributedBytesToInsert",0
+"DistributedFilesToInsert",0
+"BrokenDistributedFilesToInsert",0
+"TablesToDropQueueSize",0
+"MaxDDLEntryID",0
+"MaxPushedDDLEntryID",0
+"PartsTemporary",0
+"PartsPreCommitted",0
+"PartsCommitted",25
+"PartsPreActive",0
+"PartsActive",25
+"AttachedDatabase",5
+"AttachedTable",128
+"PartsOutdated",284
+"PartsDeleting",0
+"PartsDeleteOnDestroy",0
+"PartsWide",76
+"PartsCompact",233
+"MMappedFiles",2
+"MMappedFileBytes",498055184
+"AsynchronousReadWait",0
+"PendingAsyncInsert",0
+"KafkaConsumers",0
+"KafkaConsumersWithAssignment",0
+"KafkaProducers",0
+"KafkaLibrdkafkaThreads",0
+"KafkaBackgroundReads",0
+"KafkaConsumersInUse",0
+"KafkaWrites",0
+"KafkaAssignedPartitions",0
+"FilesystemCacheReadBuffers",0
+"CacheFileSegments",0
+"CacheDetachedFileSegments",0
+"FilesystemCacheSize",0
+"FilesystemCacheSizeLimit",0
+"FilesystemCacheElements",0
+"FilesystemCacheDownloadQueueElements",0
+"FilesystemCacheDelayedCleanupElements",0
+"FilesystemCacheHoldFileSegments",0
+"AsyncInsertCacheSize",0
+"S3Requests",0
+"KeeperAliveConnections",0
+"KeeperOutstandingRequets",0
+"ThreadsInOvercommitTracker",0
+"IOUringPendingEvents",0
+"IOUringInFlightEvents",0
+"ReadTaskRequestsSent",0
+"MergeTreeReadTaskRequestsSent",0
+"MergeTreeAllRangesAnnouncementsSent",0
+"CreatedTimersInQueryProfiler",166
+"ActiveTimersInQueryProfiler",2
+"RefreshableViews",0
+"RefreshingViews",0
+"StorageBufferFlushThreads",0
+"StorageBufferFlushThreadsActive",0
+"StorageBufferFlushThreadsScheduled",0
+"SharedMergeTreeThreads",0
+"SharedMergeTreeThreadsActive",0
+"SharedMergeTreeThreadsScheduled",0
+"SharedMergeTreeFetch",0
+"CacheWarmerBytesInProgress",0
+"DistrCacheOpenedConnections",0
+"DistrCacheUsedConnections",0
+"DistrCacheReadRequests",0
+"DistrCacheWriteRequests",0
+"DistrCacheServerConnections",0
+"StorageConnectionsStored",0
+"StorageConnectionsTotal",0
+"DiskConnectionsStored",0
+"DiskConnectionsTotal",0
+"HTTPConnectionsStored",0
+"HTTPConnectionsTotal",0
+"AddressesActive",0
+"AddressesBanned",0
+"FilteringMarksWithPrimaryKey",0
+"FilteringMarksWithSecondaryKeys",0
diff --git a/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_parts.csv b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_parts.csv
new file mode 100644
index 000000000..6ade3324a
--- /dev/null
+++ b/src/go/collectors/go.d.plugin/modules/clickhouse/testdata/resp_system_parts.csv
@@ -0,0 +1,6 @@
+"database","table","bytes","parts","rows"
+"system","processors_profile_log",391629,5,20107
+"system","metric_log",18302533,6,162718
+"system","query_log",196403,5,761
+"system","asynchronous_metric_log",19113663,6,70377261
+"system","trace_log",28695023,8,1733076